repo_name
string
path
string
copies
string
size
string
content
string
license
string
lcrponte/android_kernel_msm
drivers/input/misc/sparcspkr.c
8173
8218
/* * Driver for PC-speaker like devices found on various Sparc systems. * * Copyright (c) 2002 Vojtech Pavlik * Copyright (c) 2002, 2006, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/of_device.h> #include <linux/slab.h> #include <asm/io.h> MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_DESCRIPTION("Sparc Speaker beeper driver"); MODULE_LICENSE("GPL"); struct grover_beep_info { void __iomem *freq_regs; void __iomem *enable_reg; }; struct bbc_beep_info { u32 clock_freq; void __iomem *regs; }; struct sparcspkr_state { const char *name; int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); spinlock_t lock; struct input_dev *input_dev; union { struct grover_beep_info grover; struct bbc_beep_info bbc; } u; }; static u32 bbc_count_to_reg(struct bbc_beep_info *info, unsigned int count) { u32 val, clock_freq = info->clock_freq; int i; if (!count) return 0; if (count <= clock_freq >> 20) return 1 << 18; if (count >= clock_freq >> 12) return 1 << 10; val = 1 << 18; for (i = 19; i >= 11; i--) { val >>= 1; if (count <= clock_freq >> i) break; } return val; } static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent); struct bbc_beep_info *info = &state->u.bbc; unsigned int count = 0; unsigned long flags; if (type != EV_SND) return -1; switch (code) { case SND_BELL: if (value) value = 1000; case SND_TONE: break; default: return -1; } if (value > 20 && value < 32767) count = 1193182 / value; count = bbc_count_to_reg(info, count); spin_lock_irqsave(&state->lock, flags); if (count) { outb(0x01, info->regs + 0); outb(0x00, info->regs + 2); outb((count >> 16) & 0xff, info->regs + 3); outb((count >> 8) & 0xff, info->regs + 4); outb(0x00, info->regs + 5); } else { outb(0x00, info->regs + 0); } spin_unlock_irqrestore(&state->lock, flags); return 0; } static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent); struct grover_beep_info *info = &state->u.grover; unsigned int count = 0; unsigned long flags; if (type != EV_SND) return -1; switch (code) { case SND_BELL: if (value) value = 1000; case SND_TONE: break; default: return -1; } if (value > 20 && value < 32767) count = 1193182 / value; spin_lock_irqsave(&state->lock, flags); if (count) { /* enable counter 2 */ outb(inb(info->enable_reg) | 3, info->enable_reg); /* set command for counter 2, 2 byte write */ outb(0xB6, info->freq_regs + 1); /* select desired HZ */ outb(count & 0xff, info->freq_regs + 0); outb((count >> 8) & 0xff, info->freq_regs + 0); } else { /* disable counter 2 */ outb(inb_p(info->enable_reg) & 0xFC, info->enable_reg); } spin_unlock_irqrestore(&state->lock, flags); return 0; } static int __devinit sparcspkr_probe(struct device *dev) { struct sparcspkr_state *state = dev_get_drvdata(dev); struct input_dev *input_dev; int error; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; input_dev->name = state->name; input_dev->phys = "sparc/input0"; input_dev->id.bustype = BUS_ISA; input_dev->id.vendor = 0x001f; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = dev; input_dev->evbit[0] = BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); input_dev->event = state->event; error = input_register_device(input_dev); if (error) { input_free_device(input_dev); return error; } state->input_dev = input_dev; return 0; } static void sparcspkr_shutdown(struct platform_device *dev) { struct sparcspkr_state *state = dev_get_drvdata(&dev->dev); struct input_dev *input_dev = state->input_dev; /* turn off the speaker */ state->event(input_dev, EV_SND, SND_BELL, 0); } static int __devinit bbc_beep_probe(struct platform_device *op) { struct sparcspkr_state *state; struct bbc_beep_info *info; struct device_node *dp; int err = -ENOMEM; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) goto out_err; state->name = "Sparc BBC Speaker"; state->event = bbc_spkr_event; spin_lock_init(&state->lock); dp = of_find_node_by_path("/"); err = -ENODEV; if (!dp) goto out_free; info = &state->u.bbc; info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0); if (!info->clock_freq) goto out_free; info->regs = of_ioremap(&op->resource[0], 0, 6, "bbc beep"); if (!info->regs) goto out_free; dev_set_drvdata(&op->dev, state); err = sparcspkr_probe(&op->dev); if (err) goto out_clear_drvdata; return 0; out_clear_drvdata: dev_set_drvdata(&op->dev, NULL); of_iounmap(&op->resource[0], info->regs, 6); out_free: kfree(state); out_err: return err; } static int __devexit bbc_remove(struct platform_device *op) { struct sparcspkr_state *state = dev_get_drvdata(&op->dev); struct input_dev *input_dev = state->input_dev; struct bbc_beep_info *info = &state->u.bbc; /* turn off the speaker */ state->event(input_dev, EV_SND, SND_BELL, 0); input_unregister_device(input_dev); of_iounmap(&op->resource[0], info->regs, 6); dev_set_drvdata(&op->dev, NULL); kfree(state); return 0; } static const struct of_device_id bbc_beep_match[] = { { .name = "beep", .compatible = "SUNW,bbc-beep", }, {}, }; static struct platform_driver bbc_beep_driver = { .driver = { .name = "bbcbeep", .owner = THIS_MODULE, .of_match_table = bbc_beep_match, }, .probe = bbc_beep_probe, .remove = __devexit_p(bbc_remove), .shutdown = sparcspkr_shutdown, }; static int __devinit grover_beep_probe(struct platform_device *op) { struct sparcspkr_state *state; struct grover_beep_info *info; int err = -ENOMEM; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) goto out_err; state->name = "Sparc Grover Speaker"; state->event = grover_spkr_event; spin_lock_init(&state->lock); info = &state->u.grover; info->freq_regs = of_ioremap(&op->resource[2], 0, 2, "grover beep freq"); if (!info->freq_regs) goto out_free; info->enable_reg = of_ioremap(&op->resource[3], 0, 1, "grover beep enable"); if (!info->enable_reg) goto out_unmap_freq_regs; dev_set_drvdata(&op->dev, state); err = sparcspkr_probe(&op->dev); if (err) goto out_clear_drvdata; return 0; out_clear_drvdata: dev_set_drvdata(&op->dev, NULL); of_iounmap(&op->resource[3], info->enable_reg, 1); out_unmap_freq_regs: of_iounmap(&op->resource[2], info->freq_regs, 2); out_free: kfree(state); out_err: return err; } static int __devexit grover_remove(struct platform_device *op) { struct sparcspkr_state *state = dev_get_drvdata(&op->dev); struct grover_beep_info *info = &state->u.grover; struct input_dev *input_dev = state->input_dev; /* turn off the speaker */ state->event(input_dev, EV_SND, SND_BELL, 0); input_unregister_device(input_dev); of_iounmap(&op->resource[3], info->enable_reg, 1); of_iounmap(&op->resource[2], info->freq_regs, 2); dev_set_drvdata(&op->dev, NULL); kfree(state); return 0; } static const struct of_device_id grover_beep_match[] = { { .name = "beep", .compatible = "SUNW,smbus-beep", }, {}, }; static struct platform_driver grover_beep_driver = { .driver = { .name = "groverbeep", .owner = THIS_MODULE, .of_match_table = grover_beep_match, }, .probe = grover_beep_probe, .remove = __devexit_p(grover_remove), .shutdown = sparcspkr_shutdown, }; static int __init sparcspkr_init(void) { int err = platform_driver_register(&bbc_beep_driver); if (!err) { err = platform_driver_register(&grover_beep_driver); if (err) platform_driver_unregister(&bbc_beep_driver); } return err; } static void __exit sparcspkr_exit(void) { platform_driver_unregister(&bbc_beep_driver); platform_driver_unregister(&grover_beep_driver); } module_init(sparcspkr_init); module_exit(sparcspkr_exit);
gpl-2.0
yank555-lu/N3-KK-Sourcedrops
net/irda/irias_object.c
12525
13698
/********************************************************************* * * Filename: irias_object.c * Version: 0.3 * Description: IAS object database and functions * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Thu Oct 1 22:50:04 1998 * Modified at: Wed Dec 15 11:23:16 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/slab.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/module.h> #include <net/irda/irda.h> #include <net/irda/irias_object.h> hashbin_t *irias_objects; /* * Used when a missing value needs to be returned */ struct ias_value irias_missing = { IAS_MISSING, 0, 0, 0, {0}}; /* * Function ias_new_object (name, id) * * Create a new IAS object * */ struct ias_object *irias_new_object( char *name, int id) { struct ias_object *obj; IRDA_DEBUG( 4, "%s()\n", __func__); obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); if (obj == NULL) { IRDA_WARNING("%s(), Unable to allocate object!\n", __func__); return NULL; } obj->magic = IAS_OBJECT_MAGIC; obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC); if (!obj->name) { IRDA_WARNING("%s(), Unable to allocate name!\n", __func__); kfree(obj); return NULL; } obj->id = id; /* Locking notes : the attrib spinlock has lower precendence * than the objects spinlock. Never grap the objects spinlock * while holding any attrib spinlock (risk of deadlock). Jean II */ obj->attribs = hashbin_new(HB_LOCK); if (obj->attribs == NULL) { IRDA_WARNING("%s(), Unable to allocate attribs!\n", __func__); kfree(obj->name); kfree(obj); return NULL; } return obj; } EXPORT_SYMBOL(irias_new_object); /* * Function irias_delete_attrib (attrib) * * Delete given attribute and deallocate all its memory * */ static void __irias_delete_attrib(struct ias_attrib *attrib) { IRDA_ASSERT(attrib != NULL, return;); IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC, return;); kfree(attrib->name); irias_delete_value(attrib->value); attrib->magic = ~IAS_ATTRIB_MAGIC; kfree(attrib); } void __irias_delete_object(struct ias_object *obj) { IRDA_ASSERT(obj != NULL, return;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); kfree(obj->name); hashbin_delete(obj->attribs, (FREE_FUNC) __irias_delete_attrib); obj->magic = ~IAS_OBJECT_MAGIC; kfree(obj); } /* * Function irias_delete_object (obj) * * Remove object from hashbin and deallocate all attributes associated with * with this object and the object itself * */ int irias_delete_object(struct ias_object *obj) { struct ias_object *node; IRDA_ASSERT(obj != NULL, return -1;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -1;); /* Remove from list */ node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj); if (!node) IRDA_DEBUG( 0, "%s(), object already removed!\n", __func__); /* Destroy */ __irias_delete_object(obj); return 0; } EXPORT_SYMBOL(irias_delete_object); /* * Function irias_delete_attrib (obj) * * Remove attribute from hashbin and, if it was the last attribute of * the object, remove the object as well. * */ int irias_delete_attrib(struct ias_object *obj, struct ias_attrib *attrib, int cleanobject) { struct ias_attrib *node; IRDA_ASSERT(obj != NULL, return -1;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -1;); IRDA_ASSERT(attrib != NULL, return -1;); /* Remove attribute from object */ node = hashbin_remove_this(obj->attribs, (irda_queue_t *) attrib); if (!node) return 0; /* Already removed or non-existent */ /* Deallocate attribute */ __irias_delete_attrib(node); /* Check if object has still some attributes, destroy it if none. * At first glance, this look dangerous, as the kernel reference * various IAS objects. However, we only use this function on * user attributes, not kernel attributes, so there is no risk * of deleting a kernel object this way. Jean II */ node = (struct ias_attrib *) hashbin_get_first(obj->attribs); if (cleanobject && !node) irias_delete_object(obj); return 0; } /* * Function irias_insert_object (obj) * * Insert an object into the LM-IAS database * */ void irias_insert_object(struct ias_object *obj) { IRDA_ASSERT(obj != NULL, return;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); hashbin_insert(irias_objects, (irda_queue_t *) obj, 0, obj->name); } EXPORT_SYMBOL(irias_insert_object); /* * Function irias_find_object (name) * * Find object with given name * */ struct ias_object *irias_find_object(char *name) { IRDA_ASSERT(name != NULL, return NULL;); /* Unsafe (locking), object might change */ return hashbin_lock_find(irias_objects, 0, name); } EXPORT_SYMBOL(irias_find_object); /* * Function irias_find_attrib (obj, name) * * Find named attribute in object * */ struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name) { struct ias_attrib *attrib; IRDA_ASSERT(obj != NULL, return NULL;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return NULL;); IRDA_ASSERT(name != NULL, return NULL;); attrib = hashbin_lock_find(obj->attribs, 0, name); if (attrib == NULL) return NULL; /* Unsafe (locking), attrib might change */ return attrib; } /* * Function irias_add_attribute (obj, attrib) * * Add attribute to object * */ static void irias_add_attrib(struct ias_object *obj, struct ias_attrib *attrib, int owner) { IRDA_ASSERT(obj != NULL, return;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); IRDA_ASSERT(attrib != NULL, return;); IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC, return;); /* Set if attrib is owned by kernel or user space */ attrib->value->owner = owner; hashbin_insert(obj->attribs, (irda_queue_t *) attrib, 0, attrib->name); } /* * Function irias_object_change_attribute (obj_name, attrib_name, new_value) * * Change the value of an objects attribute. * */ int irias_object_change_attribute(char *obj_name, char *attrib_name, struct ias_value *new_value) { struct ias_object *obj; struct ias_attrib *attrib; unsigned long flags; /* Find object */ obj = hashbin_lock_find(irias_objects, 0, obj_name); if (obj == NULL) { IRDA_WARNING("%s: Unable to find object: %s\n", __func__, obj_name); return -1; } /* Slightly unsafe (obj might get removed under us) */ spin_lock_irqsave(&obj->attribs->hb_spinlock, flags); /* Find attribute */ attrib = hashbin_find(obj->attribs, 0, attrib_name); if (attrib == NULL) { IRDA_WARNING("%s: Unable to find attribute: %s\n", __func__, attrib_name); spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); return -1; } if ( attrib->value->type != new_value->type) { IRDA_DEBUG( 0, "%s(), changing value type not allowed!\n", __func__); spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); return -1; } /* Delete old value */ irias_delete_value(attrib->value); /* Insert new value */ attrib->value = new_value; /* Success */ spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); return 0; } EXPORT_SYMBOL(irias_object_change_attribute); /* * Function irias_object_add_integer_attrib (obj, name, value) * * Add an integer attribute to an LM-IAS object * */ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, int owner) { struct ias_attrib *attrib; IRDA_ASSERT(obj != NULL, return;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); IRDA_ASSERT(name != NULL, return;); attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __func__); return; } attrib->magic = IAS_ATTRIB_MAGIC; attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC); /* Insert value */ attrib->value = irias_new_integer_value(value); if (!attrib->name || !attrib->value) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); kfree(attrib); return; } irias_add_attrib(obj, attrib, owner); } EXPORT_SYMBOL(irias_add_integer_attrib); /* * Function irias_add_octseq_attrib (obj, name, octet_seq, len) * * Add a octet sequence attribute to an LM-IAS object * */ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, int len, int owner) { struct ias_attrib *attrib; IRDA_ASSERT(obj != NULL, return;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); IRDA_ASSERT(name != NULL, return;); IRDA_ASSERT(octets != NULL, return;); attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __func__); return; } attrib->magic = IAS_ATTRIB_MAGIC; attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC); attrib->value = irias_new_octseq_value( octets, len); if (!attrib->name || !attrib->value) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); kfree(attrib); return; } irias_add_attrib(obj, attrib, owner); } EXPORT_SYMBOL(irias_add_octseq_attrib); /* * Function irias_object_add_string_attrib (obj, string) * * Add a string attribute to an LM-IAS object * */ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, int owner) { struct ias_attrib *attrib; IRDA_ASSERT(obj != NULL, return;); IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); IRDA_ASSERT(name != NULL, return;); IRDA_ASSERT(value != NULL, return;); attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __func__); return; } attrib->magic = IAS_ATTRIB_MAGIC; attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC); attrib->value = irias_new_string_value(value); if (!attrib->name || !attrib->value) { IRDA_WARNING("%s: Unable to allocate attribute!\n", __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); kfree(attrib); return; } irias_add_attrib(obj, attrib, owner); } EXPORT_SYMBOL(irias_add_string_attrib); /* * Function irias_new_integer_value (integer) * * Create new IAS integer value * */ struct ias_value *irias_new_integer_value(int integer) { struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); return NULL; } value->type = IAS_INTEGER; value->len = 4; value->t.integer = integer; return value; } EXPORT_SYMBOL(irias_new_integer_value); /* * Function irias_new_string_value (string) * * Create new IAS string value * * Per IrLMP 1.1, 4.3.3.2, strings are up to 256 chars - Jean II */ struct ias_value *irias_new_string_value(char *string) { struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); return NULL; } value->type = IAS_STRING; value->charset = CS_ASCII; value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC); if (!value->t.string) { IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); kfree(value); return NULL; } value->len = strlen(value->t.string); return value; } /* * Function irias_new_octseq_value (octets, len) * * Create new IAS octet-sequence value * * Per IrLMP 1.1, 4.3.3.2, octet-sequence are up to 1024 bytes - Jean II */ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) { struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); return NULL; } value->type = IAS_OCT_SEQ; /* Check length */ if(len > IAS_MAX_OCTET_STRING) len = IAS_MAX_OCTET_STRING; value->len = len; value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC); if (value->t.oct_seq == NULL){ IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); kfree(value); return NULL; } return value; } struct ias_value *irias_new_missing_value(void) { struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); if (value == NULL) { IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); return NULL; } value->type = IAS_MISSING; return value; } /* * Function irias_delete_value (value) * * Delete IAS value * */ void irias_delete_value(struct ias_value *value) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(value != NULL, return;); switch (value->type) { case IAS_INTEGER: /* Fallthrough */ case IAS_MISSING: /* No need to deallocate */ break; case IAS_STRING: /* Deallocate string */ kfree(value->t.string); break; case IAS_OCT_SEQ: /* Deallocate byte stream */ kfree(value->t.oct_seq); break; default: IRDA_DEBUG(0, "%s(), Unknown value type!\n", __func__); break; } kfree(value); } EXPORT_SYMBOL(irias_delete_value);
gpl-2.0
shakalaca/ASUS_ZenFone_ZD551KL
kernel/arch/sh/drivers/pci/ops-dreamcast.c
13805
2641
/* * PCI operations for the Sega Dreamcast * * Copyright (C) 2001, 2002 M. R. Brown * Copyright (C) 2002, 2003 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/io.h> #include <mach/pci.h> /* * The !gapspci_config_access case really shouldn't happen, ever, unless * someone implicitly messes around with the last devfn value.. otherwise we * only support a single device anyways, and if we didn't have a BBA, we * wouldn't make it terribly far through the PCI setup anyways. * * Also, we could very easily support both Type 0 and Type 1 configurations * here, but since it doesn't seem that there is any such implementation in * existence, we don't bother. * * I suppose if someone actually gets around to ripping the chip out of * the BBA and hanging some more devices off of it, then this might be * something to take into consideration. However, due to the cost of the BBA, * and the general lack of activity by DC hardware hackers, this doesn't seem * likely to happen anytime soon. */ static int gapspci_config_access(unsigned char bus, unsigned int devfn) { return (bus == 0) && (devfn == 0); } /* * We can also actually read and write in b/w/l sizes! Thankfully this part * was at least done right, and we don't have to do the stupid masking and * shifting that we do on the 7751! Small wonders never cease to amaze. */ static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { *val = 0xffffffff; if (!gapspci_config_access(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break; case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break; case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break; } return PCIBIOS_SUCCESSFUL; } static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { if (!gapspci_config_access(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break; case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break; case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops gapspci_pci_ops = { .read = gapspci_read, .write = gapspci_write, };
gpl-2.0
arjen75/icecold-kernel
arch/sh/drivers/pci/ops-dreamcast.c
13805
2641
/* * PCI operations for the Sega Dreamcast * * Copyright (C) 2001, 2002 M. R. Brown * Copyright (C) 2002, 2003 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/io.h> #include <mach/pci.h> /* * The !gapspci_config_access case really shouldn't happen, ever, unless * someone implicitly messes around with the last devfn value.. otherwise we * only support a single device anyways, and if we didn't have a BBA, we * wouldn't make it terribly far through the PCI setup anyways. * * Also, we could very easily support both Type 0 and Type 1 configurations * here, but since it doesn't seem that there is any such implementation in * existence, we don't bother. * * I suppose if someone actually gets around to ripping the chip out of * the BBA and hanging some more devices off of it, then this might be * something to take into consideration. However, due to the cost of the BBA, * and the general lack of activity by DC hardware hackers, this doesn't seem * likely to happen anytime soon. */ static int gapspci_config_access(unsigned char bus, unsigned int devfn) { return (bus == 0) && (devfn == 0); } /* * We can also actually read and write in b/w/l sizes! Thankfully this part * was at least done right, and we don't have to do the stupid masking and * shifting that we do on the 7751! Small wonders never cease to amaze. */ static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { *val = 0xffffffff; if (!gapspci_config_access(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break; case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break; case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break; } return PCIBIOS_SUCCESSFUL; } static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { if (!gapspci_config_access(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break; case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break; case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops gapspci_pci_ops = { .read = gapspci_read, .write = gapspci_write, };
gpl-2.0
Vassilko/lichee
drivers/pci/remove.c
1518
4520
#include <linux/pci.h> #include <linux/module.h> #include <linux/pci-aspm.h> #include "pci.h" static void pci_free_resources(struct pci_dev *dev) { int i; msi_remove_pci_irq_vectors(dev); pci_cleanup_rom(dev); for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *res = dev->resource + i; if (res->parent) release_resource(res); } } static void pci_stop_dev(struct pci_dev *dev) { pci_pme_active(dev, false); if (dev->is_added) { pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); device_unregister(&dev->dev); dev->is_added = 0; } if (dev->bus->self) pcie_aspm_exit_link_state(dev); } static void pci_destroy_dev(struct pci_dev *dev) { /* Remove the device from the device lists, and prevent any further * list accesses from this device */ down_write(&pci_bus_sem); list_del(&dev->bus_list); dev->bus_list.next = dev->bus_list.prev = NULL; up_write(&pci_bus_sem); pci_free_resources(dev); pci_dev_put(dev); } /** * pci_remove_device_safe - remove an unused hotplug device * @dev: the device to remove * * Delete the device structure from the device lists and * notify userspace (/sbin/hotplug), but only if the device * in question is not being used by a driver. * Returns 0 on success. */ #if 0 int pci_remove_device_safe(struct pci_dev *dev) { if (pci_dev_driver(dev)) return -EBUSY; pci_destroy_dev(dev); return 0; } #endif /* 0 */ void pci_remove_bus(struct pci_bus *pci_bus) { pci_proc_detach_bus(pci_bus); down_write(&pci_bus_sem); list_del(&pci_bus->node); up_write(&pci_bus_sem); if (!pci_bus->is_added) return; pci_remove_legacy_files(pci_bus); device_unregister(&pci_bus->dev); } EXPORT_SYMBOL(pci_remove_bus); static void __pci_remove_behind_bridge(struct pci_dev *dev); /** * pci_stop_and_remove_bus_device - remove a PCI device and any children * @dev: the device to remove * * Remove a PCI device from the device lists, informing the drivers * that the device has been removed. We also remove any subordinate * buses and children in a depth-first manner. * * For each device we remove, delete the device structure from the * device lists, remove the /proc entry, and notify userspace * (/sbin/hotplug). */ void __pci_remove_bus_device(struct pci_dev *dev) { if (dev->subordinate) { struct pci_bus *b = dev->subordinate; __pci_remove_behind_bridge(dev); pci_remove_bus(b); dev->subordinate = NULL; } pci_destroy_dev(dev); } EXPORT_SYMBOL(__pci_remove_bus_device); void pci_stop_and_remove_bus_device(struct pci_dev *dev) { pci_stop_bus_device(dev); __pci_remove_bus_device(dev); } static void __pci_remove_behind_bridge(struct pci_dev *dev) { struct list_head *l, *n; if (dev->subordinate) list_for_each_safe(l, n, &dev->subordinate->devices) __pci_remove_bus_device(pci_dev_b(l)); } static void pci_stop_behind_bridge(struct pci_dev *dev) { struct list_head *l, *n; if (dev->subordinate) list_for_each_safe(l, n, &dev->subordinate->devices) pci_stop_bus_device(pci_dev_b(l)); } /** * pci_stop_and_remove_behind_bridge - stop and remove all devices behind * a PCI bridge * @dev: PCI bridge device * * Remove all devices on the bus, except for the parent bridge. * This also removes any child buses, and any devices they may * contain in a depth-first manner. */ void pci_stop_and_remove_behind_bridge(struct pci_dev *dev) { pci_stop_behind_bridge(dev); __pci_remove_behind_bridge(dev); } static void pci_stop_bus_devices(struct pci_bus *bus) { struct list_head *l, *n; /* * VFs could be removed by pci_stop_and_remove_bus_device() in the * pci_stop_bus_devices() code path for PF. * aka, bus->devices get updated in the process. * but VFs are inserted after PFs when SRIOV is enabled for PF, * We can iterate the list backwards to get prev valid PF instead * of removed VF. */ list_for_each_prev_safe(l, n, &bus->devices) { struct pci_dev *dev = pci_dev_b(l); pci_stop_bus_device(dev); } } /** * pci_stop_bus_device - stop a PCI device and any children * @dev: the device to stop * * Stop a PCI device (detach the driver, remove from the global list * and so on). This also stop any subordinate buses and children in a * depth-first manner. */ void pci_stop_bus_device(struct pci_dev *dev) { if (dev->subordinate) pci_stop_bus_devices(dev->subordinate); pci_stop_dev(dev); } EXPORT_SYMBOL(pci_stop_and_remove_bus_device); EXPORT_SYMBOL(pci_stop_and_remove_behind_bridge); EXPORT_SYMBOL_GPL(pci_stop_bus_device);
gpl-2.0
JerryScript/VaeVictus
arch/mips/kernel/i8253.c
2286
3247
/* * i8253.c 8253/PIT functions * */ #include <linux/clockchips.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <asm/delay.h> #include <asm/i8253.h> #include <asm/io.h> #include <asm/time.h> DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); /* * Initialize the PIT timer. * * This is also called after resume to bring the PIT into operation again. */ static void init_pit_timer(enum clock_event_mode mode, struct clock_event_device *evt) { raw_spin_lock(&i8253_lock); switch(mode) { case CLOCK_EVT_MODE_PERIODIC: /* binary, mode 2, LSB/MSB, ch 0 */ outb_p(0x34, PIT_MODE); outb_p(LATCH & 0xff , PIT_CH0); /* LSB */ outb(LATCH >> 8 , PIT_CH0); /* MSB */ break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: if (evt->mode == CLOCK_EVT_MODE_PERIODIC || evt->mode == CLOCK_EVT_MODE_ONESHOT) { outb_p(0x30, PIT_MODE); outb_p(0, PIT_CH0); outb_p(0, PIT_CH0); } break; case CLOCK_EVT_MODE_ONESHOT: /* One shot setup */ outb_p(0x38, PIT_MODE); break; case CLOCK_EVT_MODE_RESUME: /* Nothing to do here */ break; } raw_spin_unlock(&i8253_lock); } /* * Program the next event in oneshot mode * * Delta is given in PIT ticks */ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) { raw_spin_lock(&i8253_lock); outb_p(delta & 0xff , PIT_CH0); /* LSB */ outb(delta >> 8 , PIT_CH0); /* MSB */ raw_spin_unlock(&i8253_lock); return 0; } /* * On UP the PIT can serve all of the possible timer functions. On SMP systems * it can be solely used for the global tick. * * The profiling and update capabilites are switched off once the local apic is * registered. This mechanism replaces the previous #ifdef LOCAL_APIC - * !using_apic_timer decisions in do_timer_interrupt_hook() */ static struct clock_event_device pit_clockevent = { .name = "pit", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = init_pit_timer, .set_next_event = pit_next_event, .irq = 0, }; static irqreturn_t timer_interrupt(int irq, void *dev_id) { pit_clockevent.event_handler(&pit_clockevent); return IRQ_HANDLED; } static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER, .name = "timer" }; /* * Initialize the conversion factor and the min/max deltas of the clock event * structure and register the clock event source with the framework. */ void __init setup_pit_timer(void) { struct clock_event_device *cd = &pit_clockevent; unsigned int cpu = smp_processor_id(); /* * Start pit with the boot cpu mask and make it global after the * IO_APIC has been initialized. */ cd->cpumask = cpumask_of(cpu); clockevent_set_clock(cd, CLOCK_TICK_RATE); cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); cd->min_delta_ns = clockevent_delta2ns(0xF, cd); clockevents_register_device(cd); setup_irq(0, &irq0); } static int __init init_pit_clocksource(void) { if (num_possible_cpus() > 1) /* PIT does not scale! */ return 0; return clocksource_i8253_init(); } arch_initcall(init_pit_clocksource);
gpl-2.0
Tommy-Geenexus/android_kernel_sony_apq8064_yuga_5.x
fs/ext4/resize.c
2542
50395
/* * linux/fs/ext4/resize.c * * Support for resizing an ext4 filesystem while it is mounted. * * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> * * This could probably be made into a module, because it is not often in use. */ #define EXT4FS_DEBUG #include <linux/errno.h> #include <linux/slab.h> #include "ext4_jbd2.h" int ext4_resize_begin(struct super_block *sb) { int ret = 0; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; /* * We are not allowed to do online-resizing on a filesystem mounted * with error, because it can destroy the filesystem easily. */ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { ext4_warning(sb, "There are errors in the filesystem, " "so online resizing is not allowed\n"); return -EPERM; } if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags)) ret = -EBUSY; return ret; } void ext4_resize_end(struct super_block *sb) { clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags); smp_mb__after_clear_bit(); } #define outside(b, first, last) ((b) < (first) || (b) >= (last)) #define inside(b, first, last) ((b) >= (first) && (b) < (last)) static int verify_group_input(struct super_block *sb, struct ext4_new_group_data *input) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t start = ext4_blocks_count(es); ext4_fsblk_t end = start + input->blocks_count; ext4_group_t group = input->group; ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; unsigned overhead = ext4_bg_has_super(sb, group) ? (1 + ext4_bg_num_gdb(sb, group) + le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; ext4_fsblk_t metaend = start + overhead; struct buffer_head *bh = NULL; ext4_grpblk_t free_blocks_count, offset; int err = -EINVAL; input->free_blocks_count = free_blocks_count = input->blocks_count - 2 - overhead - sbi->s_itb_per_group; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " "(%d free, %u reserved)\n", ext4_bg_has_super(sb, input->group) ? "normal" : "no-super", input->group, input->blocks_count, free_blocks_count, input->reserved_blocks); ext4_get_group_no_and_offset(sb, start, NULL, &offset); if (group != sbi->s_groups_count) ext4_warning(sb, "Cannot add at group %u (only %u groups)", input->group, sbi->s_groups_count); else if (offset != 0) ext4_warning(sb, "Last group not full"); else if (input->reserved_blocks > input->blocks_count / 5) ext4_warning(sb, "Reserved blocks too high (%u)", input->reserved_blocks); else if (free_blocks_count < 0) ext4_warning(sb, "Bad blocks count %u", input->blocks_count); else if (!(bh = sb_bread(sb, end - 1))) ext4_warning(sb, "Cannot read last block (%llu)", end - 1); else if (outside(input->block_bitmap, start, end)) ext4_warning(sb, "Block bitmap not in group (block %llu)", (unsigned long long)input->block_bitmap); else if (outside(input->inode_bitmap, start, end)) ext4_warning(sb, "Inode bitmap not in group (block %llu)", (unsigned long long)input->inode_bitmap); else if (outside(input->inode_table, start, end) || outside(itend - 1, start, end)) ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", (unsigned long long)input->inode_table, itend - 1); else if (input->inode_bitmap == input->block_bitmap) ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)", (unsigned long long)input->block_bitmap); else if (inside(input->block_bitmap, input->inode_table, itend)) ext4_warning(sb, "Block bitmap (%llu) in inode table " "(%llu-%llu)", (unsigned long long)input->block_bitmap, (unsigned long long)input->inode_table, itend - 1); else if (inside(input->inode_bitmap, input->inode_table, itend)) ext4_warning(sb, "Inode bitmap (%llu) in inode table " "(%llu-%llu)", (unsigned long long)input->inode_bitmap, (unsigned long long)input->inode_table, itend - 1); else if (inside(input->block_bitmap, start, metaend)) ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)", (unsigned long long)input->block_bitmap, start, metaend - 1); else if (inside(input->inode_bitmap, start, metaend)) ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)", (unsigned long long)input->inode_bitmap, start, metaend - 1); else if (inside(input->inode_table, start, metaend) || inside(itend - 1, start, metaend)) ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table " "(%llu-%llu)", (unsigned long long)input->inode_table, itend - 1, start, metaend - 1); else err = 0; brelse(bh); return err; } /* * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex * group each time. */ struct ext4_new_flex_group_data { struct ext4_new_group_data *groups; /* new_group_data for groups in the flex group */ __u16 *bg_flags; /* block group flags of groups in @groups */ ext4_group_t count; /* number of groups in @groups */ }; /* * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of * @flexbg_size. * * Returns NULL on failure otherwise address of the allocated structure. */ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) { struct ext4_new_flex_group_data *flex_gd; flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS); if (flex_gd == NULL) goto out3; flex_gd->count = flexbg_size; flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) * flexbg_size, GFP_NOFS); if (flex_gd->groups == NULL) goto out2; flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS); if (flex_gd->bg_flags == NULL) goto out1; return flex_gd; out1: kfree(flex_gd->groups); out2: kfree(flex_gd); out3: return NULL; } static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd) { kfree(flex_gd->bg_flags); kfree(flex_gd->groups); kfree(flex_gd); } /* * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps * and inode tables for a flex group. * * This function is used by 64bit-resize. Note that this function allocates * group tables from the 1st group of groups contained by @flexgd, which may * be a partial of a flex group. * * @sb: super block of fs to which the groups belongs */ static void ext4_alloc_group_tables(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd, int flexbg_size) { struct ext4_new_group_data *group_data = flex_gd->groups; struct ext4_super_block *es = EXT4_SB(sb)->s_es; ext4_fsblk_t start_blk; ext4_fsblk_t last_blk; ext4_group_t src_group; ext4_group_t bb_index = 0; ext4_group_t ib_index = 0; ext4_group_t it_index = 0; ext4_group_t group; ext4_group_t last_group; unsigned overhead; BUG_ON(flex_gd->count == 0 || group_data == NULL); src_group = group_data[0].group; last_group = src_group + flex_gd->count - 1; BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) != (last_group & ~(flexbg_size - 1)))); next_group: group = group_data[0].group; start_blk = ext4_group_first_block_no(sb, src_group); last_blk = start_blk + group_data[src_group - group].blocks_count; overhead = ext4_bg_has_super(sb, src_group) ? (1 + ext4_bg_num_gdb(sb, src_group) + le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; start_blk += overhead; BUG_ON(src_group >= group_data[0].group + flex_gd->count); /* We collect contiguous blocks as much as possible. */ src_group++; for (; src_group <= last_group; src_group++) if (!ext4_bg_has_super(sb, src_group)) last_blk += group_data[src_group - group].blocks_count; else break; /* Allocate block bitmaps */ for (; bb_index < flex_gd->count; bb_index++) { if (start_blk >= last_blk) goto next_group; group_data[bb_index].block_bitmap = start_blk++; ext4_get_group_no_and_offset(sb, start_blk - 1, &group, NULL); group -= group_data[0].group; group_data[group].free_blocks_count--; if (flexbg_size > 1) flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; } /* Allocate inode bitmaps */ for (; ib_index < flex_gd->count; ib_index++) { if (start_blk >= last_blk) goto next_group; group_data[ib_index].inode_bitmap = start_blk++; ext4_get_group_no_and_offset(sb, start_blk - 1, &group, NULL); group -= group_data[0].group; group_data[group].free_blocks_count--; if (flexbg_size > 1) flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; } /* Allocate inode tables */ for (; it_index < flex_gd->count; it_index++) { if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk) goto next_group; group_data[it_index].inode_table = start_blk; ext4_get_group_no_and_offset(sb, start_blk, &group, NULL); group -= group_data[0].group; group_data[group].free_blocks_count -= EXT4_SB(sb)->s_itb_per_group; if (flexbg_size > 1) flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; start_blk += EXT4_SB(sb)->s_itb_per_group; } if (test_opt(sb, DEBUG)) { int i; group = group_data[0].group; printk(KERN_DEBUG "EXT4-fs: adding a flex group with " "%d groups, flexbg size is %d:\n", flex_gd->count, flexbg_size); for (i = 0; i < flex_gd->count; i++) { printk(KERN_DEBUG "adding %s group %u: %u " "blocks (%d free)\n", ext4_bg_has_super(sb, group + i) ? "normal" : "no-super", group + i, group_data[i].blocks_count, group_data[i].free_blocks_count); } } } static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, ext4_fsblk_t blk) { struct buffer_head *bh; int err; bh = sb_getblk(sb, blk); if (!bh) return ERR_PTR(-EIO); if ((err = ext4_journal_get_write_access(handle, bh))) { brelse(bh); bh = ERR_PTR(err); } else { memset(bh->b_data, 0, sb->s_blocksize); set_buffer_uptodate(bh); } return bh; } /* * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA. * If that fails, restart the transaction & regain write access for the * buffer head which is used for block_bitmap modifications. */ static int extend_or_restart_transaction(handle_t *handle, int thresh) { int err; if (ext4_handle_has_enough_credits(handle, thresh)) return 0; err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA); if (err < 0) return err; if (err) { err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA); if (err) return err; } return 0; } /* * set_flexbg_block_bitmap() mark @count blocks starting from @block used. * * Helper function for ext4_setup_new_group_blocks() which set . * * @sb: super block * @handle: journal handle * @flex_gd: flex group data */ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, struct ext4_new_flex_group_data *flex_gd, ext4_fsblk_t block, ext4_group_t count) { ext4_group_t count2; ext4_debug("mark blocks [%llu/%u] used\n", block, count); for (count2 = count; count > 0; count -= count2, block += count2) { ext4_fsblk_t start; struct buffer_head *bh; ext4_group_t group; int err; ext4_get_group_no_and_offset(sb, block, &group, NULL); start = ext4_group_first_block_no(sb, group); group -= flex_gd->groups[0].group; count2 = sb->s_blocksize * 8 - (block - start); if (count2 > count) count2 = count; if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { BUG_ON(flex_gd->count > 1); continue; } err = extend_or_restart_transaction(handle, 1); if (err) return err; bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); if (!bh) return -EIO; err = ext4_journal_get_write_access(handle, bh); if (err) return err; ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block, block - start, count2); ext4_set_bits(bh->b_data, block - start, count2); err = ext4_handle_dirty_metadata(handle, NULL, bh); if (unlikely(err)) return err; brelse(bh); } return 0; } /* * Set up the block and inode bitmaps, and the inode table for the new groups. * This doesn't need to be part of the main transaction, since we are only * changing blocks outside the actual filesystem. We still do journaling to * ensure the recovery is correct in case of a failure just after resize. * If any part of this fails, we simply abort the resize. * * setup_new_flex_group_blocks handles a flex group as follow: * 1. copy super block and GDT, and initialize group tables if necessary. * In this step, we only set bits in blocks bitmaps for blocks taken by * super block and GDT. * 2. allocate group tables in block bitmaps, that is, set bits in block * bitmap for blocks taken by group tables. */ static int setup_new_flex_group_blocks(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd) { int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group}; ext4_fsblk_t start; ext4_fsblk_t block; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; struct ext4_new_group_data *group_data = flex_gd->groups; __u16 *bg_flags = flex_gd->bg_flags; handle_t *handle; ext4_group_t group, count; struct buffer_head *bh = NULL; int reserved_gdb, i, j, err = 0, err2; BUG_ON(!flex_gd->count || !group_data || group_data[0].group != sbi->s_groups_count); reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); /* This transaction may be extended/restarted along the way */ handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); if (IS_ERR(handle)) return PTR_ERR(handle); group = group_data[0].group; for (i = 0; i < flex_gd->count; i++, group++) { unsigned long gdblocks; gdblocks = ext4_bg_num_gdb(sb, group); start = ext4_group_first_block_no(sb, group); /* Copy all of the GDT blocks into the backup in this group */ for (j = 0, block = start + 1; j < gdblocks; j++, block++) { struct buffer_head *gdb; ext4_debug("update backup group %#04llx\n", block); err = extend_or_restart_transaction(handle, 1); if (err) goto out; gdb = sb_getblk(sb, block); if (!gdb) { err = -EIO; goto out; } err = ext4_journal_get_write_access(handle, gdb); if (err) { brelse(gdb); goto out; } memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data, gdb->b_size); set_buffer_uptodate(gdb); err = ext4_handle_dirty_metadata(handle, NULL, gdb); if (unlikely(err)) { brelse(gdb); goto out; } brelse(gdb); } /* Zero out all of the reserved backup group descriptor * table blocks */ if (ext4_bg_has_super(sb, group)) { err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb, GFP_NOFS); if (err) goto out; } /* Initialize group tables of the grop @group */ if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED)) goto handle_bb; /* Zero out all of the inode table blocks */ block = group_data[i].inode_table; ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", block, sbi->s_itb_per_group); err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS); if (err) goto out; handle_bb: if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT) goto handle_ib; /* Initialize block bitmap of the @group */ block = group_data[i].block_bitmap; err = extend_or_restart_transaction(handle, 1); if (err) goto out; bh = bclean(handle, sb, block); if (IS_ERR(bh)) { err = PTR_ERR(bh); goto out; } if (ext4_bg_has_super(sb, group)) { ext4_debug("mark backup superblock %#04llx (+0)\n", start); ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb + 1); } ext4_mark_bitmap_end(group_data[i].blocks_count, sb->s_blocksize * 8, bh->b_data); err = ext4_handle_dirty_metadata(handle, NULL, bh); if (err) goto out; brelse(bh); handle_ib: if (bg_flags[i] & EXT4_BG_INODE_UNINIT) continue; /* Initialize inode bitmap of the @group */ block = group_data[i].inode_bitmap; err = extend_or_restart_transaction(handle, 1); if (err) goto out; /* Mark unused entries in inode bitmap used */ bh = bclean(handle, sb, block); if (IS_ERR(bh)) { err = PTR_ERR(bh); goto out; } ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); err = ext4_handle_dirty_metadata(handle, NULL, bh); if (err) goto out; brelse(bh); } bh = NULL; /* Mark group tables in block bitmap */ for (j = 0; j < GROUP_TABLE_COUNT; j++) { count = group_table_count[j]; start = (&group_data[0].block_bitmap)[j]; block = start; for (i = 1; i < flex_gd->count; i++) { block += group_table_count[j]; if (block == (&group_data[i].block_bitmap)[j]) { count += group_table_count[j]; continue; } err = set_flexbg_block_bitmap(sb, handle, flex_gd, start, count); if (err) goto out; count = group_table_count[j]; start = group_data[i].block_bitmap; block = start; } if (count) { err = set_flexbg_block_bitmap(sb, handle, flex_gd, start, count); if (err) goto out; } } out: brelse(bh); err2 = ext4_journal_stop(handle); if (err2 && !err) err = err2; return err; } /* * Iterate through the groups which hold BACKUP superblock/GDT copies in an * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before * calling this for the first time. In a sparse filesystem it will be the * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... */ static unsigned ext4_list_backups(struct super_block *sb, unsigned *three, unsigned *five, unsigned *seven) { unsigned *min = three; int mult = 3; unsigned ret; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { ret = *min; *min += 1; return ret; } if (*five < *min) { min = five; mult = 5; } if (*seven < *min) { min = seven; mult = 7; } ret = *min; *min *= mult; return ret; } /* * Check that all of the backup GDT blocks are held in the primary GDT block. * It is assumed that they are stored in group order. Returns the number of * groups in current filesystem that have BACKUPS, or -ve error code. */ static int verify_reserved_gdb(struct super_block *sb, ext4_group_t end, struct buffer_head *primary) { const ext4_fsblk_t blk = primary->b_blocknr; unsigned three = 1; unsigned five = 5; unsigned seven = 7; unsigned grp; __le32 *p = (__le32 *)primary->b_data; int gdbackups = 0; while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { if (le32_to_cpu(*p++) != grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ ext4_warning(sb, "reserved GDT %llu" " missing grp %d (%llu)", blk, grp, grp * (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + blk); return -EINVAL; } if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) return -EFBIG; } return gdbackups; } /* * Called when we need to bring a reserved group descriptor table block into * use from the resize inode. The primary copy of the new GDT block currently * is an indirect block (under the double indirect block in the resize inode). * The new backup GDT blocks will be stored as leaf blocks in this indirect * block, in group order. Even though we know all the block numbers we need, * we check to ensure that the resize inode has actually reserved these blocks. * * Don't need to update the block bitmaps because the blocks are still in use. * * We get all of the error cases out of the way, so that we are sure to not * fail once we start modifying the data on disk, because JBD has no rollback. */ static int add_new_gdb(handle_t *handle, struct inode *inode, ext4_group_t group) { struct super_block *sb = inode->i_sb; struct ext4_super_block *es = EXT4_SB(sb)->s_es; unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; struct buffer_head **o_group_desc, **n_group_desc; struct buffer_head *dind; struct buffer_head *gdb_bh; int gdbackups; struct ext4_iloc iloc; __le32 *data; int err; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", gdb_num); /* * If we are not using the primary superblock/GDT copy don't resize, * because the user tools have no way of handling this. Probably a * bad time to do it anyways. */ if (EXT4_SB(sb)->s_sbh->b_blocknr != le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { ext4_warning(sb, "won't resize using backup superblock at %llu", (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); return -EPERM; } gdb_bh = sb_bread(sb, gdblock); if (!gdb_bh) return -EIO; gdbackups = verify_reserved_gdb(sb, group, gdb_bh); if (gdbackups < 0) { err = gdbackups; goto exit_bh; } data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; dind = sb_bread(sb, le32_to_cpu(*data)); if (!dind) { err = -EIO; goto exit_bh; } data = (__le32 *)dind->b_data; if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { ext4_warning(sb, "new group %u GDT block %llu not reserved", group, gdblock); err = -EINVAL; goto exit_dind; } err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (unlikely(err)) goto exit_dind; err = ext4_journal_get_write_access(handle, gdb_bh); if (unlikely(err)) goto exit_sbh; err = ext4_journal_get_write_access(handle, dind); if (unlikely(err)) ext4_std_error(sb, err); /* ext4_reserve_inode_write() gets a reference on the iloc */ err = ext4_reserve_inode_write(handle, inode, &iloc); if (unlikely(err)) goto exit_dindj; n_group_desc = ext4_kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *), GFP_NOFS); if (!n_group_desc) { err = -ENOMEM; ext4_warning(sb, "not enough memory for %lu groups", gdb_num + 1); goto exit_inode; } /* * Finally, we have all of the possible failures behind us... * * Remove new GDT block from inode double-indirect block and clear out * the new GDT block for use (which also "frees" the backup GDT blocks * from the reserved inode). We don't need to change the bitmaps for * these blocks, because they are marked as in-use from being in the * reserved inode, and will become GDT blocks (primary and backup). */ data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; err = ext4_handle_dirty_metadata(handle, NULL, dind); if (unlikely(err)) { ext4_std_error(sb, err); goto exit_inode; } inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; ext4_mark_iloc_dirty(handle, inode, &iloc); memset(gdb_bh->b_data, 0, sb->s_blocksize); err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); if (unlikely(err)) { ext4_std_error(sb, err); goto exit_inode; } brelse(dind); o_group_desc = EXT4_SB(sb)->s_group_desc; memcpy(n_group_desc, o_group_desc, EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); n_group_desc[gdb_num] = gdb_bh; EXT4_SB(sb)->s_group_desc = n_group_desc; EXT4_SB(sb)->s_gdb_count++; ext4_kvfree(o_group_desc); le16_add_cpu(&es->s_reserved_gdt_blocks, -1); err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); if (err) ext4_std_error(sb, err); return err; exit_inode: ext4_kvfree(n_group_desc); /* ext4_handle_release_buffer(handle, iloc.bh); */ brelse(iloc.bh); exit_dindj: /* ext4_handle_release_buffer(handle, dind); */ exit_sbh: /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */ exit_dind: brelse(dind); exit_bh: brelse(gdb_bh); ext4_debug("leaving with error %d\n", err); return err; } /* * Called when we are adding a new group which has a backup copy of each of * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. * We need to add these reserved backup GDT blocks to the resize inode, so * that they are kept for future resizing and not allocated to files. * * Each reserved backup GDT block will go into a different indirect block. * The indirect blocks are actually the primary reserved GDT blocks, * so we know in advance what their block numbers are. We only get the * double-indirect block to verify it is pointing to the primary reserved * GDT blocks so we don't overwrite a data block by accident. The reserved * backup GDT blocks are stored in their reserved primary GDT block. */ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, ext4_group_t group) { struct super_block *sb = inode->i_sb; int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); struct buffer_head **primary; struct buffer_head *dind; struct ext4_iloc iloc; ext4_fsblk_t blk; __le32 *data, *end; int gdbackups = 0; int res, i; int err; primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS); if (!primary) return -ENOMEM; data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; dind = sb_bread(sb, le32_to_cpu(*data)); if (!dind) { err = -EIO; goto exit_free; } blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % EXT4_ADDR_PER_BLOCK(sb)); end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); /* Get each reserved primary GDT block and verify it holds backups */ for (res = 0; res < reserved_gdb; res++, blk++) { if (le32_to_cpu(*data) != blk) { ext4_warning(sb, "reserved block %llu" " not at offset %ld", blk, (long)(data - (__le32 *)dind->b_data)); err = -EINVAL; goto exit_bh; } primary[res] = sb_bread(sb, blk); if (!primary[res]) { err = -EIO; goto exit_bh; } gdbackups = verify_reserved_gdb(sb, group, primary[res]); if (gdbackups < 0) { brelse(primary[res]); err = gdbackups; goto exit_bh; } if (++data >= end) data = (__le32 *)dind->b_data; } for (i = 0; i < reserved_gdb; i++) { if ((err = ext4_journal_get_write_access(handle, primary[i]))) { /* int j; for (j = 0; j < i; j++) ext4_handle_release_buffer(handle, primary[j]); */ goto exit_bh; } } if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) goto exit_bh; /* * Finally we can add each of the reserved backup GDT blocks from * the new group to its reserved primary GDT block. */ blk = group * EXT4_BLOCKS_PER_GROUP(sb); for (i = 0; i < reserved_gdb; i++) { int err2; data = (__le32 *)primary[i]->b_data; /* printk("reserving backup %lu[%u] = %lu\n", primary[i]->b_blocknr, gdbackups, blk + primary[i]->b_blocknr); */ data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); if (!err) err = err2; } inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9; ext4_mark_iloc_dirty(handle, inode, &iloc); exit_bh: while (--res >= 0) brelse(primary[res]); brelse(dind); exit_free: kfree(primary); return err; } /* * Update the backup copies of the ext4 metadata. These don't need to be part * of the main resize transaction, because e2fsck will re-write them if there * is a problem (basically only OOM will cause a problem). However, we * _should_ update the backups if possible, in case the primary gets trashed * for some reason and we need to run e2fsck from a backup superblock. The * important part is that the new block and inode counts are in the backup * superblocks, and the location of the new group metadata in the GDT backups. * * We do not need take the s_resize_lock for this, because these * blocks are not otherwise touched by the filesystem code when it is * mounted. We don't need to worry about last changing from * sbi->s_groups_count, because the worst that can happen is that we * do not copy the full number of backups at this time. The resize * which changed s_groups_count will backup again. */ static void update_backups(struct super_block *sb, int blk_off, char *data, int size) { struct ext4_sb_info *sbi = EXT4_SB(sb); const ext4_group_t last = sbi->s_groups_count; const int bpg = EXT4_BLOCKS_PER_GROUP(sb); unsigned three = 1; unsigned five = 5; unsigned seven = 7; ext4_group_t group; int rest = sb->s_blocksize - size; handle_t *handle; int err = 0, err2; handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); if (IS_ERR(handle)) { group = 1; err = PTR_ERR(handle); goto exit_err; } while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) { struct buffer_head *bh; /* Out of journal space, and can't get more - abort - so sad */ if (ext4_handle_valid(handle) && handle->h_buffer_credits == 0 && ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) && (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) break; bh = sb_getblk(sb, group * bpg + blk_off); if (!bh) { err = -EIO; break; } ext4_debug("update metadata backup %#04lx\n", (unsigned long)bh->b_blocknr); if ((err = ext4_journal_get_write_access(handle, bh))) break; lock_buffer(bh); memcpy(bh->b_data, data, size); if (rest) memset(bh->b_data + size, 0, rest); set_buffer_uptodate(bh); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, NULL, bh); if (unlikely(err)) ext4_std_error(sb, err); brelse(bh); } if ((err2 = ext4_journal_stop(handle)) && !err) err = err2; /* * Ugh! Need to have e2fsck write the backup copies. It is too * late to revert the resize, we shouldn't fail just because of * the backup copies (they are only needed in case of corruption). * * However, if we got here we have a journal problem too, so we * can't really start a transaction to mark the superblock. * Chicken out and just set the flag on the hope it will be written * to disk, and if not - we will simply wait until next fsck. */ exit_err: if (err) { ext4_warning(sb, "can't update backup for group %u (err %d), " "forcing fsck on next reboot", group, err); sbi->s_mount_state &= ~EXT4_VALID_FS; sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); mark_buffer_dirty(sbi->s_sbh); } } /* * ext4_add_new_descs() adds @count group descriptor of groups * starting at @group * * @handle: journal handle * @sb: super block * @group: the group no. of the first group desc to be added * @resize_inode: the resize inode * @count: number of group descriptors to be added */ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, ext4_group_t group, struct inode *resize_inode, ext4_group_t count) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; struct buffer_head *gdb_bh; int i, gdb_off, gdb_num, err = 0; for (i = 0; i < count; i++, group++) { int reserved_gdb = ext4_bg_has_super(sb, group) ? le16_to_cpu(es->s_reserved_gdt_blocks) : 0; gdb_off = group % EXT4_DESC_PER_BLOCK(sb); gdb_num = group / EXT4_DESC_PER_BLOCK(sb); /* * We will only either add reserved group blocks to a backup group * or remove reserved blocks for the first group in a new group block. * Doing both would be mean more complex code, and sane people don't * use non-sparse filesystems anymore. This is already checked above. */ if (gdb_off) { gdb_bh = sbi->s_group_desc[gdb_num]; err = ext4_journal_get_write_access(handle, gdb_bh); if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) err = reserve_backup_gdb(handle, resize_inode, group); } else err = add_new_gdb(handle, resize_inode, group); if (err) break; } return err; } /* * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg */ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, struct ext4_new_flex_group_data *flex_gd) { struct ext4_new_group_data *group_data = flex_gd->groups; struct ext4_group_desc *gdp; struct ext4_sb_info *sbi = EXT4_SB(sb); struct buffer_head *gdb_bh; ext4_group_t group; __u16 *bg_flags = flex_gd->bg_flags; int i, gdb_off, gdb_num, err = 0; for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) { group = group_data->group; gdb_off = group % EXT4_DESC_PER_BLOCK(sb); gdb_num = group / EXT4_DESC_PER_BLOCK(sb); /* * get_write_access() has been called on gdb_bh by ext4_add_new_desc(). */ gdb_bh = sbi->s_group_desc[gdb_num]; /* Update group descriptor block for new group */ gdp = (struct ext4_group_desc *)((char *)gdb_bh->b_data + gdb_off * EXT4_DESC_SIZE(sb)); memset(gdp, 0, EXT4_DESC_SIZE(sb)); ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); ext4_inode_table_set(sb, gdp, group_data->inode_table); ext4_free_group_clusters_set(sb, gdp, EXT4_B2C(sbi, group_data->free_blocks_count)); ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); gdp->bg_flags = cpu_to_le16(*bg_flags); gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); if (unlikely(err)) { ext4_std_error(sb, err); break; } /* * We can allocate memory for mb_alloc based on the new group * descriptor */ err = ext4_mb_add_groupinfo(sb, group, gdp); if (err) break; } return err; } /* * ext4_update_super() updates the super block so that the newly added * groups can be seen by the filesystem. * * @sb: super block * @flex_gd: new added groups */ static void ext4_update_super(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd) { ext4_fsblk_t blocks_count = 0; ext4_fsblk_t free_blocks = 0; ext4_fsblk_t reserved_blocks = 0; struct ext4_new_group_data *group_data = flex_gd->groups; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int i; BUG_ON(flex_gd->count == 0 || group_data == NULL); /* * Make the new blocks and inodes valid next. We do this before * increasing the group count so that once the group is enabled, * all of its blocks and inodes are already valid. * * We always allocate group-by-group, then block-by-block or * inode-by-inode within a group, so enabling these * blocks/inodes before the group is live won't actually let us * allocate the new space yet. */ for (i = 0; i < flex_gd->count; i++) { blocks_count += group_data[i].blocks_count; free_blocks += group_data[i].free_blocks_count; } reserved_blocks = ext4_r_blocks_count(es) * 100; do_div(reserved_blocks, ext4_blocks_count(es)); reserved_blocks *= blocks_count; do_div(reserved_blocks, 100); ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count); ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks); le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) * flex_gd->count); le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) * flex_gd->count); /* * We need to protect s_groups_count against other CPUs seeing * inconsistent state in the superblock. * * The precise rules we use are: * * * Writers must perform a smp_wmb() after updating all * dependent data and before modifying the groups count * * * Readers must perform an smp_rmb() after reading the groups * count and before reading any dependent data. * * NB. These rules can be relaxed when checking the group count * while freeing data, as we can only allocate from a block * group after serialising against the group count, and we can * only then free after serialising in turn against that * allocation. */ smp_wmb(); /* Update the global fs size fields */ sbi->s_groups_count += flex_gd->count; /* Update the reserved block counts only once the new group is * active. */ ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + reserved_blocks); /* Update the free space counts */ percpu_counter_add(&sbi->s_freeclusters_counter, EXT4_B2C(sbi, free_blocks)); percpu_counter_add(&sbi->s_freeinodes_counter, EXT4_INODES_PER_GROUP(sb) * flex_gd->count); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) && sbi->s_log_groups_per_flex) { ext4_group_t flex_group; flex_group = ext4_flex_group(sbi, group_data[0].group); atomic_add(EXT4_B2C(sbi, free_blocks), &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, &sbi->s_flex_groups[flex_group].free_inodes); } if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: added group %u:" "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, blocks_count, free_blocks, reserved_blocks); } /* Add a flex group to an fs. Ensure we handle all possible error conditions * _before_ we start modifying the filesystem, because we cannot abort the * transaction and not have it write the data to disk. */ static int ext4_flex_group_add(struct super_block *sb, struct inode *resize_inode, struct ext4_new_flex_group_data *flex_gd) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t o_blocks_count; ext4_grpblk_t last; ext4_group_t group; handle_t *handle; unsigned reserved_gdb; int err = 0, err2 = 0, credit; BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags); reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks); o_blocks_count = ext4_blocks_count(es); ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); BUG_ON(last); err = setup_new_flex_group_blocks(sb, flex_gd); if (err) goto exit; /* * We will always be modifying at least the superblock and GDT * block. If we are adding a group past the last current GDT block, * we will also modify the inode and the dindirect block. If we * are adding a group with superblock/GDT backups we will also * modify each of the reserved GDT dindirect blocks. */ credit = flex_gd->count * 4 + reserved_gdb; handle = ext4_journal_start_sb(sb, credit); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto exit; } err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto exit_journal; group = flex_gd->groups[0].group; BUG_ON(group != EXT4_SB(sb)->s_groups_count); err = ext4_add_new_descs(handle, sb, group, resize_inode, flex_gd->count); if (err) goto exit_journal; err = ext4_setup_new_descs(handle, sb, flex_gd); if (err) goto exit_journal; ext4_update_super(sb, flex_gd); err = ext4_handle_dirty_super(handle, sb); exit_journal: err2 = ext4_journal_stop(handle); if (!err) err = err2; if (!err) { int i; update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, sizeof(struct ext4_super_block)); for (i = 0; i < flex_gd->count; i++, group++) { struct buffer_head *gdb_bh; int gdb_num; gdb_num = group / EXT4_BLOCKS_PER_GROUP(sb); gdb_bh = sbi->s_group_desc[gdb_num]; update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, gdb_bh->b_size); } } exit: return err; } static int ext4_setup_next_flex_gd(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd, ext4_fsblk_t n_blocks_count, unsigned long flexbg_size) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct ext4_new_group_data *group_data = flex_gd->groups; ext4_fsblk_t o_blocks_count; ext4_group_t n_group; ext4_group_t group; ext4_group_t last_group; ext4_grpblk_t last; ext4_grpblk_t blocks_per_group; unsigned long i; blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb); o_blocks_count = ext4_blocks_count(es); if (o_blocks_count == n_blocks_count) return 0; ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); BUG_ON(last); ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last); last_group = group | (flexbg_size - 1); if (last_group > n_group) last_group = n_group; flex_gd->count = last_group - group + 1; for (i = 0; i < flex_gd->count; i++) { int overhead; group_data[i].group = group + i; group_data[i].blocks_count = blocks_per_group; overhead = ext4_bg_has_super(sb, group + i) ? (1 + ext4_bg_num_gdb(sb, group + i) + le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; group_data[i].free_blocks_count = blocks_per_group - overhead; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | EXT4_BG_INODE_UNINIT; else flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; } if (last_group == n_group && EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) /* We need to initialize block bitmap of last group. */ flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; if ((last_group == n_group) && (last != blocks_per_group - 1)) { group_data[i - 1].blocks_count = last + 1; group_data[i - 1].free_blocks_count -= blocks_per_group- last - 1; } return 1; } /* Add group descriptor data to an existing or new group descriptor block. * Ensure we handle all possible error conditions _before_ we start modifying * the filesystem, because we cannot abort the transaction and not have it * write the data to disk. * * If we are on a GDT block boundary, we need to get the reserved GDT block. * Otherwise, we may need to add backup GDT blocks for a sparse group. * * We only need to hold the superblock lock while we are actually adding * in the new group's counts to the superblock. Prior to that we have * not really "added" the group at all. We re-check that we are still * adding in the last group in case things have changed since verifying. */ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) { struct ext4_new_flex_group_data flex_gd; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int reserved_gdb = ext4_bg_has_super(sb, input->group) ? le16_to_cpu(es->s_reserved_gdt_blocks) : 0; struct inode *inode = NULL; int gdb_off, gdb_num; int err; __u16 bg_flags = 0; gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { ext4_warning(sb, "Can't resize non-sparse filesystem further"); return -EPERM; } if (ext4_blocks_count(es) + input->blocks_count < ext4_blocks_count(es)) { ext4_warning(sb, "blocks_count overflow"); return -EINVAL; } if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < le32_to_cpu(es->s_inodes_count)) { ext4_warning(sb, "inodes_count overflow"); return -EINVAL; } if (reserved_gdb || gdb_off == 0) { if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE) || !le16_to_cpu(es->s_reserved_gdt_blocks)) { ext4_warning(sb, "No reserved GDT blocks, can't resize"); return -EPERM; } inode = ext4_iget(sb, EXT4_RESIZE_INO); if (IS_ERR(inode)) { ext4_warning(sb, "Error opening resize inode"); return PTR_ERR(inode); } } err = verify_group_input(sb, input); if (err) goto out; flex_gd.count = 1; flex_gd.groups = input; flex_gd.bg_flags = &bg_flags; err = ext4_flex_group_add(sb, inode, &flex_gd); out: iput(inode); return err; } /* ext4_group_add */ /* * extend a group without checking assuming that checking has been done. */ static int ext4_group_extend_no_check(struct super_block *sb, ext4_fsblk_t o_blocks_count, ext4_grpblk_t add) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; handle_t *handle; int err = 0, err2; /* We will update the superblock, one block bitmap, and * one group descriptor via ext4_group_add_blocks(). */ handle = ext4_journal_start_sb(sb, 3); if (IS_ERR(handle)) { err = PTR_ERR(handle); ext4_warning(sb, "error %d on journal start", err); return err; } err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) { ext4_warning(sb, "error %d on journal write access", err); goto errout; } ext4_blocks_count_set(es, o_blocks_count + add); ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add); ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, o_blocks_count + add); /* We add the blocks to the bitmap and set the group need init bit */ err = ext4_group_add_blocks(handle, sb, o_blocks_count, add); if (err) goto errout; ext4_handle_dirty_super(handle, sb); ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, o_blocks_count + add); errout: err2 = ext4_journal_stop(handle); if (err2 && !err) err = err2; if (!err) { if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extended group to %llu " "blocks\n", ext4_blocks_count(es)); update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es, sizeof(struct ext4_super_block)); } return err; } /* * Extend the filesystem to the new number of blocks specified. This entry * point is only used to extend the current filesystem to the end of the last * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" * for emergencies (because it has no dependencies on reserved blocks). * * If we _really_ wanted, we could use default values to call ext4_group_add() * allow the "remount" trick to work for arbitrary resizing, assuming enough * GDT blocks are reserved to grow to the desired size. */ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, ext4_fsblk_t n_blocks_count) { ext4_fsblk_t o_blocks_count; ext4_grpblk_t last; ext4_grpblk_t add; struct buffer_head *bh; int err; ext4_group_t group; o_blocks_count = ext4_blocks_count(es); if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "extending last group from %llu to %llu blocks", o_blocks_count, n_blocks_count); if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) return 0; if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { ext4_msg(sb, KERN_ERR, "filesystem too large to resize to %llu blocks safely", n_blocks_count); if (sizeof(sector_t) < 8) ext4_warning(sb, "CONFIG_LBDAF not enabled"); return -EINVAL; } if (n_blocks_count < o_blocks_count) { ext4_warning(sb, "can't shrink FS - resize aborted"); return -EINVAL; } /* Handle the remaining blocks in the last group only. */ ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); if (last == 0) { ext4_warning(sb, "need to use ext2online to resize further"); return -EPERM; } add = EXT4_BLOCKS_PER_GROUP(sb) - last; if (o_blocks_count + add < o_blocks_count) { ext4_warning(sb, "blocks_count overflow"); return -EINVAL; } if (o_blocks_count + add > n_blocks_count) add = n_blocks_count - o_blocks_count; if (o_blocks_count + add < n_blocks_count) ext4_warning(sb, "will only finish group (%llu blocks, %u new)", o_blocks_count + add, add); /* See if the device is actually as big as what was requested */ bh = sb_bread(sb, o_blocks_count + add - 1); if (!bh) { ext4_warning(sb, "can't read last block, resize aborted"); return -ENOSPC; } brelse(bh); err = ext4_group_extend_no_check(sb, o_blocks_count, add); return err; } /* ext4_group_extend */ /* * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count * * @sb: super block of the fs to be resized * @n_blocks_count: the number of blocks resides in the resized fs */ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) { struct ext4_new_flex_group_data *flex_gd = NULL; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; struct buffer_head *bh; struct inode *resize_inode; ext4_fsblk_t o_blocks_count; ext4_group_t o_group; ext4_group_t n_group; ext4_grpblk_t offset, add; unsigned long n_desc_blocks; unsigned long o_desc_blocks; unsigned long desc_blocks; int err = 0, flexbg_size = 1; o_blocks_count = ext4_blocks_count(es); if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "resizing filesystem from %llu " "to %llu blocks", o_blocks_count, n_blocks_count); if (n_blocks_count < o_blocks_count) { /* On-line shrinking not supported */ ext4_warning(sb, "can't shrink FS - resize aborted"); return -EINVAL; } if (n_blocks_count == o_blocks_count) /* Nothing need to do */ return 0; ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset); ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); n_desc_blocks = (n_group + EXT4_DESC_PER_BLOCK(sb)) / EXT4_DESC_PER_BLOCK(sb); o_desc_blocks = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); desc_blocks = n_desc_blocks - o_desc_blocks; if (desc_blocks && (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE) || le16_to_cpu(es->s_reserved_gdt_blocks) < desc_blocks)) { ext4_warning(sb, "No reserved GDT blocks, can't resize"); return -EPERM; } resize_inode = ext4_iget(sb, EXT4_RESIZE_INO); if (IS_ERR(resize_inode)) { ext4_warning(sb, "Error opening resize inode"); return PTR_ERR(resize_inode); } /* See if the device is actually as big as what was requested */ bh = sb_bread(sb, n_blocks_count - 1); if (!bh) { ext4_warning(sb, "can't read last block, resize aborted"); return -ENOSPC; } brelse(bh); /* extend the last group */ if (n_group == o_group) add = n_blocks_count - o_blocks_count; else add = EXT4_BLOCKS_PER_GROUP(sb) - (offset + 1); if (add > 0) { err = ext4_group_extend_no_check(sb, o_blocks_count, add); if (err) goto out; } if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) && es->s_log_groups_per_flex) flexbg_size = 1 << es->s_log_groups_per_flex; o_blocks_count = ext4_blocks_count(es); if (o_blocks_count == n_blocks_count) goto out; flex_gd = alloc_flex_gd(flexbg_size); if (flex_gd == NULL) { err = -ENOMEM; goto out; } /* Add flex groups. Note that a regular group is a * flex group with 1 group. */ while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count, flexbg_size)) { ext4_alloc_group_tables(sb, flex_gd, flexbg_size); err = ext4_flex_group_add(sb, resize_inode, flex_gd); if (unlikely(err)) break; } out: if (flex_gd) free_flex_gd(flex_gd); iput(resize_inode); if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "resized filesystem from %llu " "upto %llu blocks", o_blocks_count, n_blocks_count); return err; }
gpl-2.0
ztemt/A465_5.1_kernel
fs/qnx4/namei.c
2542
3190
/* * QNX4 file system, Linux implementation. * * Version : 0.2.1 * * Using parts of the xiafs filesystem. * * History : * * 01-06-1998 by Richard Frowijn : first release. * 21-06-1998 by Frank Denis : dcache support, fixed error codes. * 04-07-1998 by Frank Denis : first step for rmdir/unlink. */ #include <linux/buffer_head.h> #include "qnx4.h" /* * check if the filename is correct. For some obscure reason, qnx writes a * new file twice in the directory entry, first with all possible options at 0 * and for a second time the way it is, they want us not to access the qnx * filesystem when whe are using linux. */ static int qnx4_match(int len, const char *name, struct buffer_head *bh, unsigned long *offset) { struct qnx4_inode_entry *de; int namelen, thislen; if (bh == NULL) { printk(KERN_WARNING "qnx4: matching unassigned buffer !\n"); return 0; } de = (struct qnx4_inode_entry *) (bh->b_data + *offset); *offset += QNX4_DIR_ENTRY_SIZE; if ((de->di_status & QNX4_FILE_LINK) != 0) { namelen = QNX4_NAME_MAX; } else { namelen = QNX4_SHORT_NAME_MAX; } thislen = strlen( de->di_fname ); if ( thislen > namelen ) thislen = namelen; if (len != thislen) { return 0; } if (strncmp(name, de->di_fname, len) == 0) { if ((de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)) != 0) { return 1; } } return 0; } static struct buffer_head *qnx4_find_entry(int len, struct inode *dir, const char *name, struct qnx4_inode_entry **res_dir, int *ino) { unsigned long block, offset, blkofs; struct buffer_head *bh; *res_dir = NULL; if (!dir->i_sb) { printk(KERN_WARNING "qnx4: no superblock on dir.\n"); return NULL; } bh = NULL; block = offset = blkofs = 0; while (blkofs * QNX4_BLOCK_SIZE + offset < dir->i_size) { if (!bh) { block = qnx4_block_map(dir, blkofs); if (block) bh = sb_bread(dir->i_sb, block); if (!bh) { blkofs++; continue; } } *res_dir = (struct qnx4_inode_entry *) (bh->b_data + offset); if (qnx4_match(len, name, bh, &offset)) { *ino = block * QNX4_INODES_PER_BLOCK + (offset / QNX4_DIR_ENTRY_SIZE) - 1; return bh; } if (offset < bh->b_size) { continue; } brelse(bh); bh = NULL; offset = 0; blkofs++; } brelse(bh); *res_dir = NULL; return NULL; } struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { int ino; struct qnx4_inode_entry *de; struct qnx4_link_info *lnk; struct buffer_head *bh; const char *name = dentry->d_name.name; int len = dentry->d_name.len; struct inode *foundinode = NULL; if (!(bh = qnx4_find_entry(len, dir, name, &de, &ino))) goto out; /* The entry is linked, let's get the real info */ if ((de->di_status & QNX4_FILE_LINK) == QNX4_FILE_LINK) { lnk = (struct qnx4_link_info *) de; ino = (le32_to_cpu(lnk->dl_inode_blk) - 1) * QNX4_INODES_PER_BLOCK + lnk->dl_inode_ndx; } brelse(bh); foundinode = qnx4_iget(dir->i_sb, ino); if (IS_ERR(foundinode)) { QNX4DEBUG((KERN_ERR "qnx4: lookup->iget -> error %ld\n", PTR_ERR(foundinode))); return ERR_CAST(foundinode); } out: d_add(dentry, foundinode); return NULL; }
gpl-2.0
hakcenter/android_kernel_samsung_hlte
net/xfrm/xfrm_policy.c
2798
72800
/* * xfrm_policy.c * * Changes: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * Kazunori MIYAZAWA @USAGI * YOSHIFUJI Hideaki * Split up af-specific portion * Derek Atkins <derek@ihtfp.com> Add the post_input processor * */ #include <linux/err.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/cache.h> #include <linux/audit.h> #include <net/dst.h> #include <net/xfrm.h> #include <net/ip.h> #ifdef CONFIG_XFRM_STATISTICS #include <net/snmp.h> #endif #include "xfrm_hash.h" DEFINE_MUTEX(xfrm_cfg_mutex); EXPORT_SYMBOL(xfrm_cfg_mutex); static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); static struct dst_entry *xfrm_policy_sk_bundles; static DEFINE_RWLOCK(xfrm_policy_lock); static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; static struct kmem_cache *xfrm_dst_cache __read_mostly; static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); static void xfrm_init_pmtu(struct dst_entry *dst); static int stale_bundle(struct dst_entry *dst); static int xfrm_bundle_ok(struct xfrm_dst *xdst); static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, int dir); static inline int __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi4 *fl4 = &fl->u.ip4; return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && (fl4->flowi4_proto == sel->proto || !sel->proto) && (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); } static inline int __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi6 *fl6 = &fl->u.ip6; return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && (fl6->flowi6_proto == sel->proto || !sel->proto) && (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); } int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, unsigned short family) { switch (family) { case AF_INET: return __xfrm4_selector_match(sel, fl); case AF_INET6: return __xfrm6_selector_match(sel, fl); } return 0; } static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, const xfrm_address_t *saddr, const xfrm_address_t *daddr, int family) { struct xfrm_policy_afinfo *afinfo; struct dst_entry *dst; afinfo = xfrm_policy_get_afinfo(family); if (unlikely(afinfo == NULL)) return ERR_PTR(-EAFNOSUPPORT); dst = afinfo->dst_lookup(net, tos, saddr, daddr); xfrm_policy_put_afinfo(afinfo); return dst; } static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, xfrm_address_t *prev_saddr, xfrm_address_t *prev_daddr, int family) { struct net *net = xs_net(x); xfrm_address_t *saddr = &x->props.saddr; xfrm_address_t *daddr = &x->id.daddr; struct dst_entry *dst; if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { saddr = x->coaddr; daddr = prev_daddr; } if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { saddr = prev_saddr; daddr = x->coaddr; } dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); if (!IS_ERR(dst)) { if (prev_saddr != saddr) memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); if (prev_daddr != daddr) memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); } return dst; } static inline unsigned long make_jiffies(long secs) { if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) return MAX_SCHEDULE_TIMEOUT-1; else return secs*HZ; } static void xfrm_policy_timer(unsigned long data) { struct xfrm_policy *xp = (struct xfrm_policy*)data; unsigned long now = get_seconds(); long next = LONG_MAX; int warn = 0; int dir; read_lock(&xp->lock); if (unlikely(xp->walk.dead)) goto out; dir = xfrm_policy_id2dir(xp->index); if (xp->lft.hard_add_expires_seconds) { long tmo = xp->lft.hard_add_expires_seconds + xp->curlft.add_time - now; if (tmo <= 0) goto expired; if (tmo < next) next = tmo; } if (xp->lft.hard_use_expires_seconds) { long tmo = xp->lft.hard_use_expires_seconds + (xp->curlft.use_time ? : xp->curlft.add_time) - now; if (tmo <= 0) goto expired; if (tmo < next) next = tmo; } if (xp->lft.soft_add_expires_seconds) { long tmo = xp->lft.soft_add_expires_seconds + xp->curlft.add_time - now; if (tmo <= 0) { warn = 1; tmo = XFRM_KM_TIMEOUT; } if (tmo < next) next = tmo; } if (xp->lft.soft_use_expires_seconds) { long tmo = xp->lft.soft_use_expires_seconds + (xp->curlft.use_time ? : xp->curlft.add_time) - now; if (tmo <= 0) { warn = 1; tmo = XFRM_KM_TIMEOUT; } if (tmo < next) next = tmo; } if (warn) km_policy_expired(xp, dir, 0, 0); if (next != LONG_MAX && !mod_timer(&xp->timer, jiffies + make_jiffies(next))) xfrm_pol_hold(xp); out: read_unlock(&xp->lock); xfrm_pol_put(xp); return; expired: read_unlock(&xp->lock); if (!xfrm_policy_delete(xp, dir)) km_policy_expired(xp, dir, 1, 0); xfrm_pol_put(xp); } static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) { struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); if (unlikely(pol->walk.dead)) flo = NULL; else xfrm_pol_hold(pol); return flo; } static int xfrm_policy_flo_check(struct flow_cache_object *flo) { struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); return !pol->walk.dead; } static void xfrm_policy_flo_delete(struct flow_cache_object *flo) { xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); } static const struct flow_cache_ops xfrm_policy_fc_ops = { .get = xfrm_policy_flo_get, .check = xfrm_policy_flo_check, .delete = xfrm_policy_flo_delete, }; /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 * SPD calls. */ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) { struct xfrm_policy *policy; policy = kzalloc(sizeof(struct xfrm_policy), gfp); if (policy) { write_pnet(&policy->xp_net, net); INIT_LIST_HEAD(&policy->walk.all); INIT_HLIST_NODE(&policy->bydst); INIT_HLIST_NODE(&policy->byidx); rwlock_init(&policy->lock); atomic_set(&policy->refcnt, 1); setup_timer(&policy->timer, xfrm_policy_timer, (unsigned long)policy); policy->flo.ops = &xfrm_policy_fc_ops; } return policy; } EXPORT_SYMBOL(xfrm_policy_alloc); /* Destroy xfrm_policy: descendant resources must be released to this moment. */ void xfrm_policy_destroy(struct xfrm_policy *policy) { BUG_ON(!policy->walk.dead); if (del_timer(&policy->timer)) BUG(); security_xfrm_policy_free(policy->security); kfree(policy); } EXPORT_SYMBOL(xfrm_policy_destroy); /* Rule must be locked. Release descentant resources, announce * entry dead. The rule must be unlinked from lists to the moment. */ static void xfrm_policy_kill(struct xfrm_policy *policy) { policy->walk.dead = 1; atomic_inc(&policy->genid); if (del_timer(&policy->timer)) xfrm_pol_put(policy); xfrm_pol_put(policy); } static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; static inline unsigned int idx_hash(struct net *net, u32 index) { return __idx_hash(index, net->xfrm.policy_idx_hmask); } static struct hlist_head *policy_hash_bysel(struct net *net, const struct xfrm_selector *sel, unsigned short family, int dir) { unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; unsigned int hash = __sel_hash(sel, family, hmask); return (hash == hmask + 1 ? &net->xfrm.policy_inexact[dir] : net->xfrm.policy_bydst[dir].table + hash); } static struct hlist_head *policy_hash_direct(struct net *net, const xfrm_address_t *daddr, const xfrm_address_t *saddr, unsigned short family, int dir) { unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; unsigned int hash = __addr_hash(daddr, saddr, family, hmask); return net->xfrm.policy_bydst[dir].table + hash; } static void xfrm_dst_hash_transfer(struct hlist_head *list, struct hlist_head *ndsttable, unsigned int nhashmask) { struct hlist_node *entry, *tmp, *entry0 = NULL; struct xfrm_policy *pol; unsigned int h0 = 0; redo: hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { unsigned int h; h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, pol->family, nhashmask); if (!entry0) { hlist_del(entry); hlist_add_head(&pol->bydst, ndsttable+h); h0 = h; } else { if (h != h0) continue; hlist_del(entry); hlist_add_after(entry0, &pol->bydst); } entry0 = entry; } if (!hlist_empty(list)) { entry0 = NULL; goto redo; } } static void xfrm_idx_hash_transfer(struct hlist_head *list, struct hlist_head *nidxtable, unsigned int nhashmask) { struct hlist_node *entry, *tmp; struct xfrm_policy *pol; hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { unsigned int h; h = __idx_hash(pol->index, nhashmask); hlist_add_head(&pol->byidx, nidxtable+h); } } static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) { return ((old_hmask + 1) << 1) - 1; } static void xfrm_bydst_resize(struct net *net, int dir) { unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; unsigned int nhashmask = xfrm_new_hash_mask(hmask); unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); struct hlist_head *odst = net->xfrm.policy_bydst[dir].table; struct hlist_head *ndst = xfrm_hash_alloc(nsize); int i; if (!ndst) return; write_lock_bh(&xfrm_policy_lock); for (i = hmask; i >= 0; i--) xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); net->xfrm.policy_bydst[dir].table = ndst; net->xfrm.policy_bydst[dir].hmask = nhashmask; write_unlock_bh(&xfrm_policy_lock); xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); } static void xfrm_byidx_resize(struct net *net, int total) { unsigned int hmask = net->xfrm.policy_idx_hmask; unsigned int nhashmask = xfrm_new_hash_mask(hmask); unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); struct hlist_head *oidx = net->xfrm.policy_byidx; struct hlist_head *nidx = xfrm_hash_alloc(nsize); int i; if (!nidx) return; write_lock_bh(&xfrm_policy_lock); for (i = hmask; i >= 0; i--) xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); net->xfrm.policy_byidx = nidx; net->xfrm.policy_idx_hmask = nhashmask; write_unlock_bh(&xfrm_policy_lock); xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); } static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) { unsigned int cnt = net->xfrm.policy_count[dir]; unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; if (total) *total += cnt; if ((hmask + 1) < xfrm_policy_hashmax && cnt > hmask) return 1; return 0; } static inline int xfrm_byidx_should_resize(struct net *net, int total) { unsigned int hmask = net->xfrm.policy_idx_hmask; if ((hmask + 1) < xfrm_policy_hashmax && total > hmask) return 1; return 0; } void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) { read_lock_bh(&xfrm_policy_lock); si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; si->spdhcnt = net->xfrm.policy_idx_hmask; si->spdhmcnt = xfrm_policy_hashmax; read_unlock_bh(&xfrm_policy_lock); } EXPORT_SYMBOL(xfrm_spd_getinfo); static DEFINE_MUTEX(hash_resize_mutex); static void xfrm_hash_resize(struct work_struct *work) { struct net *net = container_of(work, struct net, xfrm.policy_hash_work); int dir, total; mutex_lock(&hash_resize_mutex); total = 0; for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { if (xfrm_bydst_should_resize(net, dir, &total)) xfrm_bydst_resize(net, dir); } if (xfrm_byidx_should_resize(net, total)) xfrm_byidx_resize(net, total); mutex_unlock(&hash_resize_mutex); } /* Generate new index... KAME seems to generate them ordered by cost * of an absolute inpredictability of ordering of rules. This will not pass. */ static u32 xfrm_gen_index(struct net *net, int dir) { static u32 idx_generator; for (;;) { struct hlist_node *entry; struct hlist_head *list; struct xfrm_policy *p; u32 idx; int found; idx = (idx_generator | dir); idx_generator += 8; if (idx == 0) idx = 8; list = net->xfrm.policy_byidx + idx_hash(net, idx); found = 0; hlist_for_each_entry(p, entry, list, byidx) { if (p->index == idx) { found = 1; break; } } if (!found) return idx; } } static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) { u32 *p1 = (u32 *) s1; u32 *p2 = (u32 *) s2; int len = sizeof(struct xfrm_selector) / sizeof(u32); int i; for (i = 0; i < len; i++) { if (p1[i] != p2[i]) return 1; } return 0; } int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) { struct net *net = xp_net(policy); struct xfrm_policy *pol; struct xfrm_policy *delpol; struct hlist_head *chain; struct hlist_node *entry, *newpos; u32 mark = policy->mark.v & policy->mark.m; write_lock_bh(&xfrm_policy_lock); chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); delpol = NULL; newpos = NULL; hlist_for_each_entry(pol, entry, chain, bydst) { if (pol->type == policy->type && !selector_cmp(&pol->selector, &policy->selector) && (mark & pol->mark.m) == pol->mark.v && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { if (excl) { write_unlock_bh(&xfrm_policy_lock); return -EEXIST; } delpol = pol; if (policy->priority > pol->priority) continue; } else if (policy->priority >= pol->priority) { newpos = &pol->bydst; continue; } if (delpol) break; } if (newpos) hlist_add_after(newpos, &policy->bydst); else hlist_add_head(&policy->bydst, chain); xfrm_pol_hold(policy); net->xfrm.policy_count[dir]++; atomic_inc(&flow_cache_genid); if (delpol) __xfrm_policy_unlink(delpol, dir); policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); policy->curlft.add_time = get_seconds(); policy->curlft.use_time = 0; if (!mod_timer(&policy->timer, jiffies + HZ)) xfrm_pol_hold(policy); list_add(&policy->walk.all, &net->xfrm.policy_all); write_unlock_bh(&xfrm_policy_lock); if (delpol) xfrm_policy_kill(delpol); else if (xfrm_bydst_should_resize(net, dir, NULL)) schedule_work(&net->xfrm.policy_hash_work); return 0; } EXPORT_SYMBOL(xfrm_policy_insert); struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; struct hlist_node *entry; *err = 0; write_lock_bh(&xfrm_policy_lock); chain = policy_hash_bysel(net, sel, sel->family, dir); ret = NULL; hlist_for_each_entry(pol, entry, chain, bydst) { if (pol->type == type && (mark & pol->mark.m) == pol->mark.v && !selector_cmp(sel, &pol->selector) && xfrm_sec_ctx_match(ctx, pol->security)) { xfrm_pol_hold(pol); if (delete) { *err = security_xfrm_policy_delete( pol->security); if (*err) { write_unlock_bh(&xfrm_policy_lock); return pol; } __xfrm_policy_unlink(pol, dir); } ret = pol; break; } } write_unlock_bh(&xfrm_policy_lock); if (ret && delete) xfrm_policy_kill(ret); return ret; } EXPORT_SYMBOL(xfrm_policy_bysel_ctx); struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, int dir, u32 id, int delete, int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; struct hlist_node *entry; *err = -ENOENT; if (xfrm_policy_id2dir(id) != dir) return NULL; *err = 0; write_lock_bh(&xfrm_policy_lock); chain = net->xfrm.policy_byidx + idx_hash(net, id); ret = NULL; hlist_for_each_entry(pol, entry, chain, byidx) { if (pol->type == type && pol->index == id && (mark & pol->mark.m) == pol->mark.v) { xfrm_pol_hold(pol); if (delete) { *err = security_xfrm_policy_delete( pol->security); if (*err) { write_unlock_bh(&xfrm_policy_lock); return pol; } __xfrm_policy_unlink(pol, dir); } ret = pol; break; } } write_unlock_bh(&xfrm_policy_lock); if (ret && delete) xfrm_policy_kill(ret); return ret; } EXPORT_SYMBOL(xfrm_policy_byid); #ifdef CONFIG_SECURITY_NETWORK_XFRM static inline int xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) { int dir, err = 0; for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { struct xfrm_policy *pol; struct hlist_node *entry; int i; hlist_for_each_entry(pol, entry, &net->xfrm.policy_inexact[dir], bydst) { if (pol->type != type) continue; err = security_xfrm_policy_delete(pol->security); if (err) { xfrm_audit_policy_delete(pol, 0, audit_info->loginuid, audit_info->sessionid, audit_info->secid); return err; } } for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { hlist_for_each_entry(pol, entry, net->xfrm.policy_bydst[dir].table + i, bydst) { if (pol->type != type) continue; err = security_xfrm_policy_delete( pol->security); if (err) { xfrm_audit_policy_delete(pol, 0, audit_info->loginuid, audit_info->sessionid, audit_info->secid); return err; } } } } return err; } #else static inline int xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) { return 0; } #endif int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) { int dir, err = 0, cnt = 0; write_lock_bh(&xfrm_policy_lock); err = xfrm_policy_flush_secctx_check(net, type, audit_info); if (err) goto out; for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { struct xfrm_policy *pol; struct hlist_node *entry; int i; again1: hlist_for_each_entry(pol, entry, &net->xfrm.policy_inexact[dir], bydst) { if (pol->type != type) continue; __xfrm_policy_unlink(pol, dir); write_unlock_bh(&xfrm_policy_lock); cnt++; xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, audit_info->sessionid, audit_info->secid); xfrm_policy_kill(pol); write_lock_bh(&xfrm_policy_lock); goto again1; } for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { again2: hlist_for_each_entry(pol, entry, net->xfrm.policy_bydst[dir].table + i, bydst) { if (pol->type != type) continue; __xfrm_policy_unlink(pol, dir); write_unlock_bh(&xfrm_policy_lock); cnt++; xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, audit_info->sessionid, audit_info->secid); xfrm_policy_kill(pol); write_lock_bh(&xfrm_policy_lock); goto again2; } } } if (!cnt) err = -ESRCH; out: write_unlock_bh(&xfrm_policy_lock); return err; } EXPORT_SYMBOL(xfrm_policy_flush); int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, int (*func)(struct xfrm_policy *, int, int, void*), void *data) { struct xfrm_policy *pol; struct xfrm_policy_walk_entry *x; int error = 0; if (walk->type >= XFRM_POLICY_TYPE_MAX && walk->type != XFRM_POLICY_TYPE_ANY) return -EINVAL; if (list_empty(&walk->walk.all) && walk->seq != 0) return 0; write_lock_bh(&xfrm_policy_lock); if (list_empty(&walk->walk.all)) x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); else x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); list_for_each_entry_from(x, &net->xfrm.policy_all, all) { if (x->dead) continue; pol = container_of(x, struct xfrm_policy, walk); if (walk->type != XFRM_POLICY_TYPE_ANY && walk->type != pol->type) continue; error = func(pol, xfrm_policy_id2dir(pol->index), walk->seq, data); if (error) { list_move_tail(&walk->walk.all, &x->all); goto out; } walk->seq++; } if (walk->seq == 0) { error = -ENOENT; goto out; } list_del_init(&walk->walk.all); out: write_unlock_bh(&xfrm_policy_lock); return error; } EXPORT_SYMBOL(xfrm_policy_walk); void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) { INIT_LIST_HEAD(&walk->walk.all); walk->walk.dead = 1; walk->type = type; walk->seq = 0; } EXPORT_SYMBOL(xfrm_policy_walk_init); void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) { if (list_empty(&walk->walk.all)) return; write_lock_bh(&xfrm_policy_lock); list_del(&walk->walk.all); write_unlock_bh(&xfrm_policy_lock); } EXPORT_SYMBOL(xfrm_policy_walk_done); /* * Find policy to apply to this flow. * * Returns 0 if policy found, else an -errno. */ static int xfrm_policy_match(const struct xfrm_policy *pol, const struct flowi *fl, u8 type, u16 family, int dir) { const struct xfrm_selector *sel = &pol->selector; int match, ret = -ESRCH; if (pol->family != family || (fl->flowi_mark & pol->mark.m) != pol->mark.v || pol->type != type) return ret; match = xfrm_selector_match(sel, fl, family); if (match) ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, dir); return ret; } static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, const struct flowi *fl, u16 family, u8 dir) { int err; struct xfrm_policy *pol, *ret; const xfrm_address_t *daddr, *saddr; struct hlist_node *entry; struct hlist_head *chain; u32 priority = ~0U; daddr = xfrm_flowi_daddr(fl, family); saddr = xfrm_flowi_saddr(fl, family); if (unlikely(!daddr || !saddr)) return NULL; read_lock_bh(&xfrm_policy_lock); chain = policy_hash_direct(net, daddr, saddr, family, dir); ret = NULL; hlist_for_each_entry(pol, entry, chain, bydst) { err = xfrm_policy_match(pol, fl, type, family, dir); if (err) { if (err == -ESRCH) continue; else { ret = ERR_PTR(err); goto fail; } } else { ret = pol; priority = ret->priority; break; } } chain = &net->xfrm.policy_inexact[dir]; hlist_for_each_entry(pol, entry, chain, bydst) { err = xfrm_policy_match(pol, fl, type, family, dir); if (err) { if (err == -ESRCH) continue; else { ret = ERR_PTR(err); goto fail; } } else if (pol->priority < priority) { ret = pol; break; } } if (ret) xfrm_pol_hold(ret); fail: read_unlock_bh(&xfrm_policy_lock); return ret; } static struct xfrm_policy * __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) { #ifdef CONFIG_XFRM_SUB_POLICY struct xfrm_policy *pol; pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); if (pol != NULL) return pol; #endif return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); } static struct flow_cache_object * xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct flow_cache_object *old_obj, void *ctx) { struct xfrm_policy *pol; if (old_obj) xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); pol = __xfrm_policy_lookup(net, fl, family, dir); if (IS_ERR_OR_NULL(pol)) return ERR_CAST(pol); /* Resolver returns two references: * one for cache and one for caller of flow_cache_lookup() */ xfrm_pol_hold(pol); return &pol->flo; } static inline int policy_to_flow_dir(int dir) { if (XFRM_POLICY_IN == FLOW_DIR_IN && XFRM_POLICY_OUT == FLOW_DIR_OUT && XFRM_POLICY_FWD == FLOW_DIR_FWD) return dir; switch (dir) { default: case XFRM_POLICY_IN: return FLOW_DIR_IN; case XFRM_POLICY_OUT: return FLOW_DIR_OUT; case XFRM_POLICY_FWD: return FLOW_DIR_FWD; } } static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, const struct flowi *fl) { struct xfrm_policy *pol; read_lock_bh(&xfrm_policy_lock); if ((pol = sk->sk_policy[dir]) != NULL) { int match = xfrm_selector_match(&pol->selector, fl, sk->sk_family); int err = 0; if (match) { if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { pol = NULL; goto out; } err = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, policy_to_flow_dir(dir)); if (!err) xfrm_pol_hold(pol); else if (err == -ESRCH) pol = NULL; else pol = ERR_PTR(err); } else pol = NULL; } out: read_unlock_bh(&xfrm_policy_lock); return pol; } static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) { struct net *net = xp_net(pol); struct hlist_head *chain = policy_hash_bysel(net, &pol->selector, pol->family, dir); list_add(&pol->walk.all, &net->xfrm.policy_all); hlist_add_head(&pol->bydst, chain); hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index)); net->xfrm.policy_count[dir]++; xfrm_pol_hold(pol); if (xfrm_bydst_should_resize(net, dir, NULL)) schedule_work(&net->xfrm.policy_hash_work); } static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, int dir) { struct net *net = xp_net(pol); if (hlist_unhashed(&pol->bydst)) return NULL; hlist_del(&pol->bydst); hlist_del(&pol->byidx); list_del(&pol->walk.all); net->xfrm.policy_count[dir]--; return pol; } int xfrm_policy_delete(struct xfrm_policy *pol, int dir) { write_lock_bh(&xfrm_policy_lock); pol = __xfrm_policy_unlink(pol, dir); write_unlock_bh(&xfrm_policy_lock); if (pol) { xfrm_policy_kill(pol); return 0; } return -ENOENT; } EXPORT_SYMBOL(xfrm_policy_delete); int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) { struct net *net = xp_net(pol); struct xfrm_policy *old_pol; #ifdef CONFIG_XFRM_SUB_POLICY if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) return -EINVAL; #endif write_lock_bh(&xfrm_policy_lock); old_pol = sk->sk_policy[dir]; sk->sk_policy[dir] = pol; if (pol) { pol->curlft.add_time = get_seconds(); pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); } if (old_pol) /* Unlinking succeeds always. This is the only function * allowed to delete or replace socket policy. */ __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); write_unlock_bh(&xfrm_policy_lock); if (old_pol) { xfrm_policy_kill(old_pol); } return 0; } static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) { struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); if (newp) { newp->selector = old->selector; if (security_xfrm_policy_clone(old->security, &newp->security)) { kfree(newp); return NULL; /* ENOMEM */ } newp->lft = old->lft; newp->curlft = old->curlft; newp->mark = old->mark; newp->action = old->action; newp->flags = old->flags; newp->xfrm_nr = old->xfrm_nr; newp->index = old->index; newp->type = old->type; memcpy(newp->xfrm_vec, old->xfrm_vec, newp->xfrm_nr*sizeof(struct xfrm_tmpl)); write_lock_bh(&xfrm_policy_lock); __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); write_unlock_bh(&xfrm_policy_lock); xfrm_pol_put(newp); } return newp; } int __xfrm_sk_clone_policy(struct sock *sk) { struct xfrm_policy *p0 = sk->sk_policy[0], *p1 = sk->sk_policy[1]; sk->sk_policy[0] = sk->sk_policy[1] = NULL; if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) return -ENOMEM; if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) return -ENOMEM; return 0; } static int xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, unsigned short family) { int err; struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); if (unlikely(afinfo == NULL)) return -EINVAL; err = afinfo->get_saddr(net, local, remote); xfrm_policy_put_afinfo(afinfo); return err; } /* Resolve list of templates for the flow, given policy. */ static int xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, struct xfrm_state **xfrm, unsigned short family) { struct net *net = xp_net(policy); int nx; int i, error; xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); xfrm_address_t tmp; for (nx=0, i = 0; i < policy->xfrm_nr; i++) { struct xfrm_state *x; xfrm_address_t *remote = daddr; xfrm_address_t *local = saddr; struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; if (tmpl->mode == XFRM_MODE_TUNNEL || tmpl->mode == XFRM_MODE_BEET) { remote = &tmpl->id.daddr; local = &tmpl->saddr; if (xfrm_addr_any(local, tmpl->encap_family)) { error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); if (error) goto fail; local = &tmp; } } x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); if (x && x->km.state == XFRM_STATE_VALID) { xfrm[nx++] = x; daddr = remote; saddr = local; continue; } if (x) { error = (x->km.state == XFRM_STATE_ERROR ? -EINVAL : -EAGAIN); xfrm_state_put(x); } else if (error == -ESRCH) error = -EAGAIN; if (!tmpl->optional) goto fail; } return nx; fail: for (nx--; nx>=0; nx--) xfrm_state_put(xfrm[nx]); return error; } static int xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, struct xfrm_state **xfrm, unsigned short family) { struct xfrm_state *tp[XFRM_MAX_DEPTH]; struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; int cnx = 0; int error; int ret; int i; for (i = 0; i < npols; i++) { if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { error = -ENOBUFS; goto fail; } ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); if (ret < 0) { error = ret; goto fail; } else cnx += ret; } /* found states are sorted for outbound processing */ if (npols > 1) xfrm_state_sort(xfrm, tpp, cnx, family); return cnx; fail: for (cnx--; cnx>=0; cnx--) xfrm_state_put(tpp[cnx]); return error; } /* Check that the bundle accepts the flow and its components are * still valid. */ static inline int xfrm_get_tos(const struct flowi *fl, int family) { struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); int tos; if (!afinfo) return -EINVAL; tos = afinfo->get_tos(fl); xfrm_policy_put_afinfo(afinfo); return tos; } static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) { struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); struct dst_entry *dst = &xdst->u.dst; if (xdst->route == NULL) { /* Dummy bundle - if it has xfrms we were not * able to build bundle as template resolution failed. * It means we need to try again resolving. */ if (xdst->num_xfrms > 0) return NULL; } else { /* Real bundle */ if (stale_bundle(dst)) return NULL; } dst_hold(dst); return flo; } static int xfrm_bundle_flo_check(struct flow_cache_object *flo) { struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); struct dst_entry *dst = &xdst->u.dst; if (!xdst->route) return 0; if (stale_bundle(dst)) return 0; return 1; } static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) { struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); struct dst_entry *dst = &xdst->u.dst; dst_free(dst); } static const struct flow_cache_ops xfrm_bundle_fc_ops = { .get = xfrm_bundle_flo_get, .check = xfrm_bundle_flo_check, .delete = xfrm_bundle_flo_delete, }; static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) { struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct dst_ops *dst_ops; struct xfrm_dst *xdst; if (!afinfo) return ERR_PTR(-EINVAL); switch (family) { case AF_INET: dst_ops = &net->xfrm.xfrm4_dst_ops; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: dst_ops = &net->xfrm.xfrm6_dst_ops; break; #endif default: BUG(); } xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); if (likely(xdst)) { memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry)); xdst->flo.ops = &xfrm_bundle_fc_ops; } else xdst = ERR_PTR(-ENOBUFS); xfrm_policy_put_afinfo(afinfo); return xdst; } static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len) { struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(dst->ops->family); int err; if (!afinfo) return -EINVAL; err = afinfo->init_path(path, dst, nfheader_len); xfrm_policy_put_afinfo(afinfo); return err; } static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl) { struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(xdst->u.dst.ops->family); int err; if (!afinfo) return -EINVAL; err = afinfo->fill_dst(xdst, dev, fl); xfrm_policy_put_afinfo(afinfo); return err; } /* Allocate chain of dst_entry's, attach known xfrm's, calculate * all the metrics... Shortly, bundle a bundle. */ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx, const struct flowi *fl, struct dst_entry *dst) { struct net *net = xp_net(policy); unsigned long now = jiffies; struct net_device *dev; struct xfrm_mode *inner_mode; struct dst_entry *dst_prev = NULL; struct dst_entry *dst0 = NULL; int i = 0; int err; int header_len = 0; int nfheader_len = 0; int trailer_len = 0; int tos; int family = policy->selector.family; xfrm_address_t saddr, daddr; xfrm_flowi_addr_get(fl, &saddr, &daddr, family); tos = xfrm_get_tos(fl, family); err = tos; if (tos < 0) goto put_states; dst_hold(dst); for (; i < nx; i++) { struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); struct dst_entry *dst1 = &xdst->u.dst; err = PTR_ERR(xdst); if (IS_ERR(xdst)) { dst_release(dst); goto put_states; } if (xfrm[i]->sel.family == AF_UNSPEC) { inner_mode = xfrm_ip2inner_mode(xfrm[i], xfrm_af2proto(family)); if (!inner_mode) { err = -EAFNOSUPPORT; dst_release(dst); goto put_states; } } else inner_mode = xfrm[i]->inner_mode; if (!dst_prev) dst0 = dst1; else { dst_prev->child = dst_clone(dst1); dst1->flags |= DST_NOHASH; } xdst->route = dst; dst_copy_metrics(dst1, dst); if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { family = xfrm[i]->props.family; dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, family); err = PTR_ERR(dst); if (IS_ERR(dst)) goto put_states; } else dst_hold(dst); dst1->xfrm = xfrm[i]; xdst->xfrm_genid = xfrm[i]->genid; dst1->obsolete = -1; dst1->flags |= DST_HOST; dst1->lastuse = now; dst1->input = dst_discard; dst1->output = inner_mode->afinfo->output; dst1->next = dst_prev; dst_prev = dst1; header_len += xfrm[i]->props.header_len; if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) nfheader_len += xfrm[i]->props.header_len; trailer_len += xfrm[i]->props.trailer_len; } dst_prev->child = dst; dst0->path = dst; err = -ENODEV; dev = dst->dev; if (!dev) goto free_dst; /* Copy neighbour for reachability confirmation */ dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst))); xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); xfrm_init_pmtu(dst_prev); for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; err = xfrm_fill_dst(xdst, dev, fl); if (err) goto free_dst; dst_prev->header_len = header_len; dst_prev->trailer_len = trailer_len; header_len -= xdst->u.dst.xfrm->props.header_len; trailer_len -= xdst->u.dst.xfrm->props.trailer_len; } out: return dst0; put_states: for (; i < nx; i++) xfrm_state_put(xfrm[i]); free_dst: if (dst0) dst_free(dst0); dst0 = ERR_PTR(err); goto out; } static int inline xfrm_dst_alloc_copy(void **target, const void *src, int size) { if (!*target) { *target = kmalloc(size, GFP_ATOMIC); if (!*target) return -ENOMEM; } memcpy(*target, src, size); return 0; } static int inline xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) { #ifdef CONFIG_XFRM_SUB_POLICY struct xfrm_dst *xdst = (struct xfrm_dst *)dst; return xfrm_dst_alloc_copy((void **)&(xdst->partner), sel, sizeof(*sel)); #else return 0; #endif } static int inline xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) { #ifdef CONFIG_XFRM_SUB_POLICY struct xfrm_dst *xdst = (struct xfrm_dst *)dst; return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); #else return 0; #endif } static int xfrm_expand_policies(const struct flowi *fl, u16 family, struct xfrm_policy **pols, int *num_pols, int *num_xfrms) { int i; if (*num_pols == 0 || !pols[0]) { *num_pols = 0; *num_xfrms = 0; return 0; } if (IS_ERR(pols[0])) return PTR_ERR(pols[0]); *num_xfrms = pols[0]->xfrm_nr; #ifdef CONFIG_XFRM_SUB_POLICY if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && pols[0]->type != XFRM_POLICY_TYPE_MAIN) { pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), XFRM_POLICY_TYPE_MAIN, fl, family, XFRM_POLICY_OUT); if (pols[1]) { if (IS_ERR(pols[1])) { xfrm_pols_put(pols, *num_pols); return PTR_ERR(pols[1]); } (*num_pols) ++; (*num_xfrms) += pols[1]->xfrm_nr; } } #endif for (i = 0; i < *num_pols; i++) { if (pols[i]->action != XFRM_POLICY_ALLOW) { *num_xfrms = -1; break; } } return 0; } static struct xfrm_dst * xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, const struct flowi *fl, u16 family, struct dst_entry *dst_orig) { struct net *net = xp_net(pols[0]); struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; struct dst_entry *dst; struct xfrm_dst *xdst; int err; /* Try to instantiate a bundle */ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); if (err <= 0) { if (err != 0 && err != -EAGAIN) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); return ERR_PTR(err); } dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); if (IS_ERR(dst)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); return ERR_CAST(dst); } xdst = (struct xfrm_dst *)dst; xdst->num_xfrms = err; if (num_pols > 1) err = xfrm_dst_update_parent(dst, &pols[1]->selector); else err = xfrm_dst_update_origin(dst, fl); if (unlikely(err)) { dst_free(dst); XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); return ERR_PTR(err); } xdst->num_pols = num_pols; memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); xdst->policy_genid = atomic_read(&pols[0]->genid); return xdst; } static struct flow_cache_object * xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct flow_cache_object *oldflo, void *ctx) { struct dst_entry *dst_orig = (struct dst_entry *)ctx; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; struct xfrm_dst *xdst, *new_xdst; int num_pols = 0, num_xfrms = 0, i, err, pol_dead; /* Check if the policies from old bundle are usable */ xdst = NULL; if (oldflo) { xdst = container_of(oldflo, struct xfrm_dst, flo); num_pols = xdst->num_pols; num_xfrms = xdst->num_xfrms; pol_dead = 0; for (i = 0; i < num_pols; i++) { pols[i] = xdst->pols[i]; pol_dead |= pols[i]->walk.dead; } if (pol_dead) { dst_free(&xdst->u.dst); xdst = NULL; num_pols = 0; num_xfrms = 0; oldflo = NULL; } } /* Resolve policies to use if we couldn't get them from * previous cache entry */ if (xdst == NULL) { num_pols = 1; pols[0] = __xfrm_policy_lookup(net, fl, family, dir); err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); if (err < 0) goto inc_error; if (num_pols == 0) return NULL; if (num_xfrms <= 0) goto make_dummy_bundle; } new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); if (IS_ERR(new_xdst)) { err = PTR_ERR(new_xdst); if (err != -EAGAIN) goto error; if (oldflo == NULL) goto make_dummy_bundle; dst_hold(&xdst->u.dst); return oldflo; } else if (new_xdst == NULL) { num_xfrms = 0; if (oldflo == NULL) goto make_dummy_bundle; xdst->num_xfrms = 0; dst_hold(&xdst->u.dst); return oldflo; } /* Kill the previous bundle */ if (xdst) { /* The policies were stolen for newly generated bundle */ xdst->num_pols = 0; dst_free(&xdst->u.dst); } /* Flow cache does not have reference, it dst_free()'s, * but we do need to return one reference for original caller */ dst_hold(&new_xdst->u.dst); return &new_xdst->flo; make_dummy_bundle: /* We found policies, but there's no bundles to instantiate: * either because the policy blocks, has no transformations or * we could not build template (no xfrm_states).*/ xdst = xfrm_alloc_dst(net, family); if (IS_ERR(xdst)) { xfrm_pols_put(pols, num_pols); return ERR_CAST(xdst); } xdst->num_pols = num_pols; xdst->num_xfrms = num_xfrms; memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); dst_hold(&xdst->u.dst); return &xdst->flo; inc_error: XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); error: if (xdst != NULL) dst_free(&xdst->u.dst); else xfrm_pols_put(pols, num_pols); return ERR_PTR(err); } static struct dst_entry *make_blackhole(struct net *net, u16 family, struct dst_entry *dst_orig) { struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct dst_entry *ret; if (!afinfo) { dst_release(dst_orig); ret = ERR_PTR(-EINVAL); } else { ret = afinfo->blackhole_route(net, dst_orig); } xfrm_policy_put_afinfo(afinfo); return ret; } /* Main function: finds/creates a bundle for given flow. * * At the moment we eat a raw IP route. Mostly to speed up lookups * on interfaces with disabled IPsec. */ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, const struct flowi *fl, struct sock *sk, int flags) { struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; struct flow_cache_object *flo; struct xfrm_dst *xdst; struct dst_entry *dst, *route; u16 family = dst_orig->ops->family; u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); int i, err, num_pols, num_xfrms = 0, drop_pols = 0; restart: dst = NULL; xdst = NULL; route = NULL; if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { num_pols = 1; pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); if (err < 0) goto dropdst; if (num_pols) { if (num_xfrms <= 0) { drop_pols = num_pols; goto no_transform; } xdst = xfrm_resolve_and_create_bundle( pols, num_pols, fl, family, dst_orig); if (IS_ERR(xdst)) { xfrm_pols_put(pols, num_pols); err = PTR_ERR(xdst); goto dropdst; } else if (xdst == NULL) { num_xfrms = 0; drop_pols = num_pols; goto no_transform; } dst_hold(&xdst->u.dst); spin_lock_bh(&xfrm_policy_sk_bundle_lock); xdst->u.dst.next = xfrm_policy_sk_bundles; xfrm_policy_sk_bundles = &xdst->u.dst; spin_unlock_bh(&xfrm_policy_sk_bundle_lock); route = xdst->route; } } if (xdst == NULL) { /* To accelerate a bit... */ if ((dst_orig->flags & DST_NOXFRM) || !net->xfrm.policy_count[XFRM_POLICY_OUT]) goto nopol; flo = flow_cache_lookup(net, fl, family, dir, xfrm_bundle_lookup, dst_orig); if (flo == NULL) goto nopol; if (IS_ERR(flo)) { err = PTR_ERR(flo); goto dropdst; } xdst = container_of(flo, struct xfrm_dst, flo); num_pols = xdst->num_pols; num_xfrms = xdst->num_xfrms; memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); route = xdst->route; } dst = &xdst->u.dst; if (route == NULL && num_xfrms > 0) { /* The only case when xfrm_bundle_lookup() returns a * bundle with null route, is when the template could * not be resolved. It means policies are there, but * bundle could not be created, since we don't yet * have the xfrm_state's. We need to wait for KM to * negotiate new SA's or bail out with error.*/ if (net->xfrm.sysctl_larval_drop) { /* EREMOTE tells the caller to generate * a one-shot blackhole route. */ dst_release(dst); xfrm_pols_put(pols, drop_pols); XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); return make_blackhole(net, family, dst_orig); } if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(&net->xfrm.km_waitq, &wait); set_current_state(TASK_INTERRUPTIBLE); schedule(); set_current_state(TASK_RUNNING); remove_wait_queue(&net->xfrm.km_waitq, &wait); if (!signal_pending(current)) { dst_release(dst); goto restart; } err = -ERESTART; } else err = -EAGAIN; XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); goto error; } no_transform: if (num_pols == 0) goto nopol; if ((flags & XFRM_LOOKUP_ICMP) && !(pols[0]->flags & XFRM_POLICY_ICMP)) { err = -ENOENT; goto error; } for (i = 0; i < num_pols; i++) pols[i]->curlft.use_time = get_seconds(); if (num_xfrms < 0) { /* Prohibit the flow */ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); err = -EPERM; goto error; } else if (num_xfrms > 0) { /* Flow transformed */ dst_release(dst_orig); } else { /* Flow passes untransformed */ dst_release(dst); dst = dst_orig; } ok: xfrm_pols_put(pols, drop_pols); return dst; nopol: if (!(flags & XFRM_LOOKUP_ICMP)) { dst = dst_orig; goto ok; } err = -ENOENT; error: dst_release(dst); dropdst: dst_release(dst_orig); xfrm_pols_put(pols, drop_pols); return ERR_PTR(err); } EXPORT_SYMBOL(xfrm_lookup); static inline int xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) { struct xfrm_state *x; if (!skb->sp || idx < 0 || idx >= skb->sp->len) return 0; x = skb->sp->xvec[idx]; if (!x->type->reject) return 0; return x->type->reject(x, skb, fl); } /* When skb is transformed back to its "native" form, we have to * check policy restrictions. At the moment we make this in maximally * stupid way. Shame on me. :-) Of course, connected sockets must * have policy cached at them. */ static inline int xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family) { if (xfrm_state_kern(x)) return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); return x->id.proto == tmpl->id.proto && (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && (x->props.reqid == tmpl->reqid || !tmpl->reqid) && x->props.mode == tmpl->mode && (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && !(x->props.mode != XFRM_MODE_TRANSPORT && xfrm_state_addr_cmp(tmpl, x, family)); } /* * 0 or more than 0 is returned when validation is succeeded (either bypass * because of optional transport mode, or next index of the mathced secpath * state with the template. * -1 is returned when no matching template is found. * Otherwise "-2 - errored_index" is returned. */ static inline int xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, unsigned short family) { int idx = start; if (tmpl->optional) { if (tmpl->mode == XFRM_MODE_TRANSPORT) return start; } else start = -1; for (; idx < sp->len; idx++) { if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) return ++idx; if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { if (start == -1) start = -2-idx; break; } } return start; } int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse) { struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); int err; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; afinfo->decode_session(skb, fl, reverse); err = security_xfrm_decode_session(skb, &fl->flowi_secid); xfrm_policy_put_afinfo(afinfo); return err; } EXPORT_SYMBOL(__xfrm_decode_session); static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) { for (; k < sp->len; k++) { if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { *idxp = k; return 1; } } return 0; } int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) { struct net *net = dev_net(skb->dev); struct xfrm_policy *pol; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int npols = 0; int xfrm_nr; int pi; int reverse; struct flowi fl; u8 fl_dir; int xerr_idx = -1; reverse = dir & ~XFRM_POLICY_MASK; dir &= XFRM_POLICY_MASK; fl_dir = policy_to_flow_dir(dir); if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); return 0; } nf_nat_decode_session(skb, &fl, family); /* First, check used SA against their selectors. */ if (skb->sp) { int i; for (i=skb->sp->len-1; i>=0; i--) { struct xfrm_state *x = skb->sp->xvec[i]; if (!xfrm_selector_match(&x->sel, &fl, family)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); return 0; } } } pol = NULL; if (sk && sk->sk_policy[dir]) { pol = xfrm_sk_policy_lookup(sk, dir, &fl); if (IS_ERR(pol)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); return 0; } } if (!pol) { struct flow_cache_object *flo; flo = flow_cache_lookup(net, &fl, family, fl_dir, xfrm_policy_lookup, NULL); if (IS_ERR_OR_NULL(flo)) pol = ERR_CAST(flo); else pol = container_of(flo, struct xfrm_policy, flo); } if (IS_ERR(pol)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); return 0; } if (!pol) { if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { xfrm_secpath_reject(xerr_idx, skb, &fl); XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); return 0; } return 1; } pol->curlft.use_time = get_seconds(); pols[0] = pol; npols ++; #ifdef CONFIG_XFRM_SUB_POLICY if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, &fl, family, XFRM_POLICY_IN); if (pols[1]) { if (IS_ERR(pols[1])) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); return 0; } pols[1]->curlft.use_time = get_seconds(); npols ++; } } #endif if (pol->action == XFRM_POLICY_ALLOW) { struct sec_path *sp; static struct sec_path dummy; struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; struct xfrm_tmpl **tpp = tp; int ti = 0; int i, k; if ((sp = skb->sp) == NULL) sp = &dummy; for (pi = 0; pi < npols; pi++) { if (pols[pi] != pol && pols[pi]->action != XFRM_POLICY_ALLOW) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); goto reject; } if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); goto reject_error; } for (i = 0; i < pols[pi]->xfrm_nr; i++) tpp[ti++] = &pols[pi]->xfrm_vec[i]; } xfrm_nr = ti; if (npols > 1) { xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); tpp = stp; } /* For each tunnel xfrm, find the first matching tmpl. * For each tmpl before that, find corresponding xfrm. * Order is _important_. Later we will implement * some barriers, but at the moment barriers * are implied between each two transformations. */ for (i = xfrm_nr-1, k = 0; i >= 0; i--) { k = xfrm_policy_ok(tpp[i], sp, k, family); if (k < 0) { if (k < -1) /* "-2 - errored_index" returned */ xerr_idx = -(2+k); XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); goto reject; } } if (secpath_has_nontransport(sp, k, &xerr_idx)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); goto reject; } xfrm_pols_put(pols, npols); return 1; } XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); reject: xfrm_secpath_reject(xerr_idx, skb, &fl); reject_error: xfrm_pols_put(pols, npols); return 0; } EXPORT_SYMBOL(__xfrm_policy_check); int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) { struct net *net = dev_net(skb->dev); struct flowi fl; struct dst_entry *dst; int res = 1; if (xfrm_decode_session(skb, &fl, family) < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); return 0; } skb_dst_force(skb); dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); if (IS_ERR(dst)) { res = 0; dst = NULL; } skb_dst_set(skb, dst); return res; } EXPORT_SYMBOL(__xfrm_route_forward); /* Optimize later using cookies and generation ids. */ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) { /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete * to "-1" to force all XFRM destinations to get validated by * dst_ops->check on every use. We do this because when a * normal route referenced by an XFRM dst is obsoleted we do * not go looking around for all parent referencing XFRM dsts * so that we can invalidate them. It is just too much work. * Instead we make the checks here on every use. For example: * * XFRM dst A --> IPv4 dst X * * X is the "xdst->route" of A (X is also the "dst->path" of A * in this example). If X is marked obsolete, "A" will not * notice. That's what we are validating here via the * stale_bundle() check. * * When a policy's bundle is pruned, we dst_free() the XFRM * dst which causes it's ->obsolete field to be set to a * positive non-zero integer. If an XFRM dst has been pruned * like this, we want to force a new route lookup. */ if (dst->obsolete < 0 && !stale_bundle(dst)) return dst; return NULL; } static int stale_bundle(struct dst_entry *dst) { return !xfrm_bundle_ok((struct xfrm_dst *)dst); } void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) { while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { dst->dev = dev_net(dev)->loopback_dev; dev_hold(dst->dev); dev_put(dev); } } EXPORT_SYMBOL(xfrm_dst_ifdown); static void xfrm_link_failure(struct sk_buff *skb) { /* Impossible. Such dst must be popped before reaches point of failure. */ } static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) { if (dst) { if (dst->obsolete) { dst_release(dst); dst = NULL; } } return dst; } static void __xfrm_garbage_collect(struct net *net) { struct dst_entry *head, *next; spin_lock_bh(&xfrm_policy_sk_bundle_lock); head = xfrm_policy_sk_bundles; xfrm_policy_sk_bundles = NULL; spin_unlock_bh(&xfrm_policy_sk_bundle_lock); while (head) { next = head->next; dst_free(head); head = next; } } static void xfrm_garbage_collect(struct net *net) { flow_cache_flush(); __xfrm_garbage_collect(net); } static void xfrm_garbage_collect_deferred(struct net *net) { flow_cache_flush_deferred(); __xfrm_garbage_collect(net); } static void xfrm_init_pmtu(struct dst_entry *dst) { do { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; u32 pmtu, route_mtu_cached; pmtu = dst_mtu(dst->child); xdst->child_mtu_cached = pmtu; pmtu = xfrm_state_mtu(dst->xfrm, pmtu); route_mtu_cached = dst_mtu(xdst->route); xdst->route_mtu_cached = route_mtu_cached; if (pmtu > route_mtu_cached) pmtu = route_mtu_cached; dst_metric_set(dst, RTAX_MTU, pmtu); } while ((dst = dst->next)); } /* Check that the bundle accepts the flow and its components are * still valid. */ static int xfrm_bundle_ok(struct xfrm_dst *first) { struct dst_entry *dst = &first->u.dst; struct xfrm_dst *last; u32 mtu; if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || (dst->dev && !netif_running(dst->dev))) return 0; last = NULL; do { struct xfrm_dst *xdst = (struct xfrm_dst *)dst; if (dst->xfrm->km.state != XFRM_STATE_VALID) return 0; if (xdst->xfrm_genid != dst->xfrm->genid) return 0; if (xdst->num_pols > 0 && xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) return 0; mtu = dst_mtu(dst->child); if (xdst->child_mtu_cached != mtu) { last = xdst; xdst->child_mtu_cached = mtu; } if (!dst_check(xdst->route, xdst->route_cookie)) return 0; mtu = dst_mtu(xdst->route); if (xdst->route_mtu_cached != mtu) { last = xdst; xdst->route_mtu_cached = mtu; } dst = dst->child; } while (dst->xfrm); if (likely(!last)) return 1; mtu = last->child_mtu_cached; for (;;) { dst = &last->u.dst; mtu = xfrm_state_mtu(dst->xfrm, mtu); if (mtu > last->route_mtu_cached) mtu = last->route_mtu_cached; dst_metric_set(dst, RTAX_MTU, mtu); if (last == first) break; last = (struct xfrm_dst *)last->u.dst.next; last->child_mtu_cached = mtu; } return 1; } static unsigned int xfrm_default_advmss(const struct dst_entry *dst) { return dst_metric_advmss(dst->path); } static unsigned int xfrm_mtu(const struct dst_entry *dst) { unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); return mtu ? : dst_mtu(dst->path); } static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) { return dst_neigh_lookup(dst->path, daddr); } int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) { struct net *net; int err = 0; if (unlikely(afinfo == NULL)) return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; write_lock_bh(&xfrm_policy_afinfo_lock); if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) err = -ENOBUFS; else { struct dst_ops *dst_ops = afinfo->dst_ops; if (likely(dst_ops->kmem_cachep == NULL)) dst_ops->kmem_cachep = xfrm_dst_cache; if (likely(dst_ops->check == NULL)) dst_ops->check = xfrm_dst_check; if (likely(dst_ops->default_advmss == NULL)) dst_ops->default_advmss = xfrm_default_advmss; if (likely(dst_ops->mtu == NULL)) dst_ops->mtu = xfrm_mtu; if (likely(dst_ops->negative_advice == NULL)) dst_ops->negative_advice = xfrm_negative_advice; if (likely(dst_ops->link_failure == NULL)) dst_ops->link_failure = xfrm_link_failure; if (likely(dst_ops->neigh_lookup == NULL)) dst_ops->neigh_lookup = xfrm_neigh_lookup; if (likely(afinfo->garbage_collect == NULL)) afinfo->garbage_collect = xfrm_garbage_collect_deferred; xfrm_policy_afinfo[afinfo->family] = afinfo; } write_unlock_bh(&xfrm_policy_afinfo_lock); rtnl_lock(); for_each_net(net) { struct dst_ops *xfrm_dst_ops; switch (afinfo->family) { case AF_INET: xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; break; #endif default: BUG(); } *xfrm_dst_ops = *afinfo->dst_ops; } rtnl_unlock(); return err; } EXPORT_SYMBOL(xfrm_policy_register_afinfo); int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) { int err = 0; if (unlikely(afinfo == NULL)) return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; write_lock_bh(&xfrm_policy_afinfo_lock); if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) err = -EINVAL; else { struct dst_ops *dst_ops = afinfo->dst_ops; xfrm_policy_afinfo[afinfo->family] = NULL; dst_ops->kmem_cachep = NULL; dst_ops->check = NULL; dst_ops->negative_advice = NULL; dst_ops->link_failure = NULL; afinfo->garbage_collect = NULL; } } write_unlock_bh(&xfrm_policy_afinfo_lock); return err; } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); static void __net_init xfrm_dst_ops_init(struct net *net) { struct xfrm_policy_afinfo *afinfo; read_lock_bh(&xfrm_policy_afinfo_lock); afinfo = xfrm_policy_afinfo[AF_INET]; if (afinfo) net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; #if IS_ENABLED(CONFIG_IPV6) afinfo = xfrm_policy_afinfo[AF_INET6]; if (afinfo) net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; #endif read_unlock_bh(&xfrm_policy_afinfo_lock); } static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) { struct xfrm_policy_afinfo *afinfo; if (unlikely(family >= NPROTO)) return NULL; read_lock(&xfrm_policy_afinfo_lock); afinfo = xfrm_policy_afinfo[family]; if (unlikely(!afinfo)) read_unlock(&xfrm_policy_afinfo_lock); return afinfo; } static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) { read_unlock(&xfrm_policy_afinfo_lock); } static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; switch (event) { case NETDEV_DOWN: xfrm_garbage_collect(dev_net(dev)); } return NOTIFY_DONE; } static struct notifier_block xfrm_dev_notifier = { .notifier_call = xfrm_dev_event, }; #ifdef CONFIG_XFRM_STATISTICS static int __net_init xfrm_statistics_init(struct net *net) { int rv; if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, sizeof(struct linux_xfrm_mib), __alignof__(struct linux_xfrm_mib)) < 0) return -ENOMEM; rv = xfrm_proc_init(net); if (rv < 0) snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); return rv; } static void xfrm_statistics_fini(struct net *net) { xfrm_proc_fini(net); snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); } #else static int __net_init xfrm_statistics_init(struct net *net) { return 0; } static void xfrm_statistics_fini(struct net *net) { } #endif static int __net_init xfrm_policy_init(struct net *net) { unsigned int hmask, sz; int dir; if (net_eq(net, &init_net)) xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", sizeof(struct xfrm_dst), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); hmask = 8 - 1; sz = (hmask+1) * sizeof(struct hlist_head); net->xfrm.policy_byidx = xfrm_hash_alloc(sz); if (!net->xfrm.policy_byidx) goto out_byidx; net->xfrm.policy_idx_hmask = hmask; for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { struct xfrm_policy_hash *htab; net->xfrm.policy_count[dir] = 0; INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); htab = &net->xfrm.policy_bydst[dir]; htab->table = xfrm_hash_alloc(sz); if (!htab->table) goto out_bydst; htab->hmask = hmask; } INIT_LIST_HEAD(&net->xfrm.policy_all); INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); if (net_eq(net, &init_net)) register_netdevice_notifier(&xfrm_dev_notifier); return 0; out_bydst: for (dir--; dir >= 0; dir--) { struct xfrm_policy_hash *htab; htab = &net->xfrm.policy_bydst[dir]; xfrm_hash_free(htab->table, sz); } xfrm_hash_free(net->xfrm.policy_byidx, sz); out_byidx: return -ENOMEM; } static void xfrm_policy_fini(struct net *net) { struct xfrm_audit audit_info; unsigned int sz; int dir; flush_work(&net->xfrm.policy_hash_work); #ifdef CONFIG_XFRM_SUB_POLICY audit_info.loginuid = -1; audit_info.sessionid = -1; audit_info.secid = 0; xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); #endif audit_info.loginuid = -1; audit_info.sessionid = -1; audit_info.secid = 0; xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); WARN_ON(!list_empty(&net->xfrm.policy_all)); for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { struct xfrm_policy_hash *htab; WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); htab = &net->xfrm.policy_bydst[dir]; sz = (htab->hmask + 1); WARN_ON(!hlist_empty(htab->table)); xfrm_hash_free(htab->table, sz); } sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); xfrm_hash_free(net->xfrm.policy_byidx, sz); } static int __net_init xfrm_net_init(struct net *net) { int rv; rv = xfrm_statistics_init(net); if (rv < 0) goto out_statistics; rv = xfrm_state_init(net); if (rv < 0) goto out_state; rv = xfrm_policy_init(net); if (rv < 0) goto out_policy; xfrm_dst_ops_init(net); rv = xfrm_sysctl_init(net); if (rv < 0) goto out_sysctl; return 0; out_sysctl: xfrm_policy_fini(net); out_policy: xfrm_state_fini(net); out_state: xfrm_statistics_fini(net); out_statistics: return rv; } static void __net_exit xfrm_net_exit(struct net *net) { xfrm_sysctl_fini(net); xfrm_policy_fini(net); xfrm_state_fini(net); xfrm_statistics_fini(net); } static struct pernet_operations __net_initdata xfrm_net_ops = { .init = xfrm_net_init, .exit = xfrm_net_exit, }; void __init xfrm_init(void) { register_pernet_subsys(&xfrm_net_ops); xfrm_input_init(); } #ifdef CONFIG_AUDITSYSCALL static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, struct audit_buffer *audit_buf) { struct xfrm_sec_ctx *ctx = xp->security; struct xfrm_selector *sel = &xp->selector; if (ctx) audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); switch(sel->family) { case AF_INET: audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); if (sel->prefixlen_s != 32) audit_log_format(audit_buf, " src_prefixlen=%d", sel->prefixlen_s); audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); if (sel->prefixlen_d != 32) audit_log_format(audit_buf, " dst_prefixlen=%d", sel->prefixlen_d); break; case AF_INET6: audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); if (sel->prefixlen_s != 128) audit_log_format(audit_buf, " src_prefixlen=%d", sel->prefixlen_s); audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); if (sel->prefixlen_d != 128) audit_log_format(audit_buf, " dst_prefixlen=%d", sel->prefixlen_d); break; } } void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, uid_t auid, u32 sessionid, u32 secid) { struct audit_buffer *audit_buf; audit_buf = xfrm_audit_start("SPD-add"); if (audit_buf == NULL) return; xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); audit_log_format(audit_buf, " res=%u", result); xfrm_audit_common_policyinfo(xp, audit_buf); audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, uid_t auid, u32 sessionid, u32 secid) { struct audit_buffer *audit_buf; audit_buf = xfrm_audit_start("SPD-delete"); if (audit_buf == NULL) return; xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); audit_log_format(audit_buf, " res=%u", result); xfrm_audit_common_policyinfo(xp, audit_buf); audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); #endif #ifdef CONFIG_XFRM_MIGRATE static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, const struct xfrm_selector *sel_tgt) { if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { if (sel_tgt->family == sel_cmp->family && xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, sel_cmp->family) == 0 && xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, sel_cmp->family) == 0 && sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { return 1; } } else { if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { return 1; } } return 0; } static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, u8 dir, u8 type) { struct xfrm_policy *pol, *ret = NULL; struct hlist_node *entry; struct hlist_head *chain; u32 priority = ~0U; read_lock_bh(&xfrm_policy_lock); chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); hlist_for_each_entry(pol, entry, chain, bydst) { if (xfrm_migrate_selector_match(sel, &pol->selector) && pol->type == type) { ret = pol; priority = ret->priority; break; } } chain = &init_net.xfrm.policy_inexact[dir]; hlist_for_each_entry(pol, entry, chain, bydst) { if (xfrm_migrate_selector_match(sel, &pol->selector) && pol->type == type && pol->priority < priority) { ret = pol; break; } } if (ret) xfrm_pol_hold(ret); read_unlock_bh(&xfrm_policy_lock); return ret; } static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) { int match = 0; if (t->mode == m->mode && t->id.proto == m->proto && (m->reqid == 0 || t->reqid == m->reqid)) { switch (t->mode) { case XFRM_MODE_TUNNEL: case XFRM_MODE_BEET: if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, m->old_family) == 0 && xfrm_addr_cmp(&t->saddr, &m->old_saddr, m->old_family) == 0) { match = 1; } break; case XFRM_MODE_TRANSPORT: /* in case of transport mode, template does not store any IP addresses, hence we just compare mode and protocol */ match = 1; break; default: break; } } return match; } /* update endpoint address(es) of template(s) */ static int xfrm_policy_migrate(struct xfrm_policy *pol, struct xfrm_migrate *m, int num_migrate) { struct xfrm_migrate *mp; int i, j, n = 0; write_lock_bh(&pol->lock); if (unlikely(pol->walk.dead)) { /* target policy has been deleted */ write_unlock_bh(&pol->lock); return -ENOENT; } for (i = 0; i < pol->xfrm_nr; i++) { for (j = 0, mp = m; j < num_migrate; j++, mp++) { if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) continue; n++; if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && pol->xfrm_vec[i].mode != XFRM_MODE_BEET) continue; /* update endpoints */ memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, sizeof(pol->xfrm_vec[i].id.daddr)); memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, sizeof(pol->xfrm_vec[i].saddr)); pol->xfrm_vec[i].encap_family = mp->new_family; /* flush bundles */ atomic_inc(&pol->genid); } } write_unlock_bh(&pol->lock); if (!n) return -ENODATA; return 0; } static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) { int i, j; if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) return -EINVAL; for (i = 0; i < num_migrate; i++) { if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, m[i].old_family) == 0) && (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, m[i].old_family) == 0)) return -EINVAL; if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) return -EINVAL; /* check if there is any duplicated entry */ for (j = i + 1; j < num_migrate; j++) { if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, sizeof(m[i].old_daddr)) && !memcmp(&m[i].old_saddr, &m[j].old_saddr, sizeof(m[i].old_saddr)) && m[i].proto == m[j].proto && m[i].mode == m[j].mode && m[i].reqid == m[j].reqid && m[i].old_family == m[j].old_family) return -EINVAL; } } return 0; } int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_migrate, struct xfrm_kmaddress *k) { int i, err, nx_cur = 0, nx_new = 0; struct xfrm_policy *pol = NULL; struct xfrm_state *x, *xc; struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; struct xfrm_state *x_new[XFRM_MAX_DEPTH]; struct xfrm_migrate *mp; if ((err = xfrm_migrate_check(m, num_migrate)) < 0) goto out; /* Stage 1 - find policy */ if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { err = -ENOENT; goto out; } /* Stage 2 - find and update state(s) */ for (i = 0, mp = m; i < num_migrate; i++, mp++) { if ((x = xfrm_migrate_state_find(mp))) { x_cur[nx_cur] = x; nx_cur++; if ((xc = xfrm_state_migrate(x, mp))) { x_new[nx_new] = xc; nx_new++; } else { err = -ENODATA; goto restore_state; } } } /* Stage 3 - update policy */ if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) goto restore_state; /* Stage 4 - delete old state(s) */ if (nx_cur) { xfrm_states_put(x_cur, nx_cur); xfrm_states_delete(x_cur, nx_cur); } /* Stage 5 - announce */ km_migrate(sel, dir, type, m, num_migrate, k); xfrm_pol_put(pol); return 0; out: return err; restore_state: if (pol) xfrm_pol_put(pol); if (nx_cur) xfrm_states_put(x_cur, nx_cur); if (nx_new) xfrm_states_delete(x_new, nx_new); return err; } EXPORT_SYMBOL(xfrm_migrate); #endif
gpl-2.0
Restorn/android_kernel_elephone_p8000
arch/arm/mach-imx/devices/platform-ipu-core.c
3566
3115
/* * Copyright (C) 2011 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <linux/dma-mapping.h> #include "../hardware.h" #include "devices-common.h" #define imx_ipu_core_entry_single(soc) \ { \ .iobase = soc ## _IPU_CTRL_BASE_ADDR, \ .synirq = soc ## _INT_IPU_SYN, \ .errirq = soc ## _INT_IPU_ERR, \ } #ifdef CONFIG_SOC_IMX31 const struct imx_ipu_core_data imx31_ipu_core_data __initconst = imx_ipu_core_entry_single(MX31); #endif #ifdef CONFIG_SOC_IMX35 const struct imx_ipu_core_data imx35_ipu_core_data __initconst = imx_ipu_core_entry_single(MX35); #endif static struct platform_device *imx_ipu_coredev __initdata; struct platform_device *__init imx_add_ipu_core( const struct imx_ipu_core_data *data) { /* The resource order is important! */ struct resource res[] = { { .start = data->iobase, .end = data->iobase + 0x5f, .flags = IORESOURCE_MEM, }, { .start = data->iobase + 0x88, .end = data->iobase + 0xb3, .flags = IORESOURCE_MEM, }, { .start = data->synirq, .end = data->synirq, .flags = IORESOURCE_IRQ, }, { .start = data->errirq, .end = data->errirq, .flags = IORESOURCE_IRQ, }, }; return imx_ipu_coredev = imx_add_platform_device("ipu-core", -1, res, ARRAY_SIZE(res), NULL, 0); } struct platform_device *__init imx_alloc_mx3_camera( const struct imx_ipu_core_data *data, const struct mx3_camera_pdata *pdata) { struct resource res[] = { { .start = data->iobase + 0x60, .end = data->iobase + 0x87, .flags = IORESOURCE_MEM, }, }; int ret = -ENOMEM; struct platform_device *pdev; if (IS_ERR_OR_NULL(imx_ipu_coredev)) return ERR_PTR(-ENODEV); pdev = platform_device_alloc("mx3-camera", 0); if (!pdev) return ERR_PTR(-ENOMEM); pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); if (!pdev->dev.dma_mask) goto err; *pdev->dev.dma_mask = DMA_BIT_MASK(32); pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) goto err; if (pdata) { struct mx3_camera_pdata *copied_pdata; ret = platform_device_add_data(pdev, pdata, sizeof(*pdata)); if (ret) { err: kfree(pdev->dev.dma_mask); platform_device_put(pdev); return ERR_PTR(-ENODEV); } copied_pdata = dev_get_platdata(&pdev->dev); copied_pdata->dma_dev = &imx_ipu_coredev->dev; } return pdev; } struct platform_device *__init imx_add_mx3_sdc_fb( const struct imx_ipu_core_data *data, struct mx3fb_platform_data *pdata) { struct resource res[] = { { .start = data->iobase + 0xb4, .end = data->iobase + 0x1bf, .flags = IORESOURCE_MEM, }, }; if (IS_ERR_OR_NULL(imx_ipu_coredev)) return ERR_PTR(-ENODEV); pdata->dma_dev = &imx_ipu_coredev->dev; return imx_add_platform_device_dmamask("mx3_sdc_fb", -1, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); }
gpl-2.0
CarbonROM/android_kernel_asus_fugu
drivers/infiniband/hw/ipath/ipath_init_chip.c
4078
33216
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/vmalloc.h> #include "ipath_kernel.h" #include "ipath_common.h" /* * min buffers we want to have per port, after driver */ #define IPATH_MIN_USER_PORT_BUFCNT 7 /* * Number of ports we are configured to use (to allow for more pio * buffers per port, etc.) Zero means use chip value. */ static ushort ipath_cfgports; module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO); MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); /* * Number of buffers reserved for driver (verbs and layered drivers.) * Initialized based on number of PIO buffers if not set via module interface. * The problem with this is that it's global, but we'll use different * numbers for different chip types. */ static ushort ipath_kpiobufs; static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort, &ipath_kpiobufs, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); /** * create_port0_egr - allocate the eager TID buffers * @dd: the infinipath device * * This code is now quite different for user and kernel, because * the kernel uses skb's, for the accelerated network performance. * This is the kernel (port0) version. * * Allocate the eager TID buffers and program them into infinipath. * We use the network layer alloc_skb() allocator to allocate the * memory, and either use the buffers as is for things like verbs * packets, or pass the buffers up to the ipath layered driver and * thence the network layer, replacing them as we do so (see * ipath_rcv_layer()). */ static int create_port0_egr(struct ipath_devdata *dd) { unsigned e, egrcnt; struct ipath_skbinfo *skbinfo; int ret; egrcnt = dd->ipath_p0_rcvegrcnt; skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt); if (skbinfo == NULL) { ipath_dev_err(dd, "allocation error for eager TID " "skb array\n"); ret = -ENOMEM; goto bail; } for (e = 0; e < egrcnt; e++) { /* * This is a bit tricky in that we allocate extra * space for 2 bytes of the 14 byte ethernet header. * These two bytes are passed in the ipath header so * the rest of the data is word aligned. We allocate * 4 bytes so that the data buffer stays word aligned. * See ipath_kreceive() for more details. */ skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL); if (!skbinfo[e].skb) { ipath_dev_err(dd, "SKB allocation error for " "eager TID %u\n", e); while (e != 0) dev_kfree_skb(skbinfo[--e].skb); vfree(skbinfo); ret = -ENOMEM; goto bail; } } /* * After loop above, so we can test non-NULL to see if ready * to use at receive, etc. */ dd->ipath_port0_skbinfo = skbinfo; for (e = 0; e < egrcnt; e++) { dd->ipath_port0_skbinfo[e].phys = ipath_map_single(dd->pcidev, dd->ipath_port0_skbinfo[e].skb->data, dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE); dd->ipath_f_put_tid(dd, e + (u64 __iomem *) ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase), RCVHQ_RCV_TYPE_EAGER, dd->ipath_port0_skbinfo[e].phys); } ret = 0; bail: return ret; } static int bringup_link(struct ipath_devdata *dd) { u64 val, ibc; int ret = 0; /* hold IBC in reset */ dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); /* * set initial max size pkt IBC will send, including ICRC; it's the * PIO buffer size in dwords, less 1; also see ipath_set_mtu() */ val = (dd->ipath_ibmaxlen >> 2) + 1; ibc = val << dd->ibcc_mpl_shift; /* flowcontrolwatermark is in units of KBytes */ ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT; /* * How often flowctrl sent. More or less in usecs; balance against * watermark value, so that in theory senders always get a flow * control update in time to not let the IB link go idle. */ ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT; /* max error tolerance */ ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; /* use "real" buffer space for */ ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT; /* IB credit flow control. */ ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; /* initially come up waiting for TS1, without sending anything. */ dd->ipath_ibcctrl = ibc; /* * Want to start out with both LINKCMD and LINKINITCMD in NOP * (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that * to stay a NOP. Flag that we are disabled, for the (unlikely) * case that some recovery path is trying to bring the link up * before we are ready. */ ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE << INFINIPATH_IBCC_LINKINITCMD_SHIFT; dd->ipath_flags |= IPATH_IB_LINK_DISABLED; ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n", (unsigned long long) ibc); ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc); // be sure chip saw it val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); ret = dd->ipath_f_bringup_serdes(dd); if (ret) dev_info(&dd->pcidev->dev, "Could not initialize SerDes, " "not usable\n"); else { /* enable IBC */ dd->ipath_control |= INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); } return ret; } static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd) { struct ipath_portdata *pd = NULL; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (pd) { pd->port_dd = dd; pd->port_cnt = 1; /* The port 0 pkey table is used by the layer interface. */ pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; pd->port_seq_cnt = 1; } return pd; } static int init_chip_first(struct ipath_devdata *dd) { struct ipath_portdata *pd; int ret = 0; u64 val; spin_lock_init(&dd->ipath_kernel_tid_lock); spin_lock_init(&dd->ipath_user_tid_lock); spin_lock_init(&dd->ipath_sendctrl_lock); spin_lock_init(&dd->ipath_uctxt_lock); spin_lock_init(&dd->ipath_sdma_lock); spin_lock_init(&dd->ipath_gpio_lock); spin_lock_init(&dd->ipath_eep_st_lock); spin_lock_init(&dd->ipath_sdepb_lock); mutex_init(&dd->ipath_eep_lock); /* * skip cfgports stuff because we are not allocating memory, * and we don't want problems if the portcnt changed due to * cfgports. We do still check and report a difference, if * not same (should be impossible). */ dd->ipath_f_config_ports(dd, ipath_cfgports); if (!ipath_cfgports) dd->ipath_cfgports = dd->ipath_portcnt; else if (ipath_cfgports <= dd->ipath_portcnt) { dd->ipath_cfgports = ipath_cfgports; ipath_dbg("Configured to use %u ports out of %u in chip\n", dd->ipath_cfgports, ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt)); } else { dd->ipath_cfgports = dd->ipath_portcnt; ipath_dbg("Tried to configured to use %u ports; chip " "only supports %u\n", ipath_cfgports, ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt)); } /* * Allocate full portcnt array, rather than just cfgports, because * cleanup iterates across all possible ports. */ dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt, GFP_KERNEL); if (!dd->ipath_pd) { ipath_dev_err(dd, "Unable to allocate portdata array, " "failing\n"); ret = -ENOMEM; goto done; } pd = create_portdata0(dd); if (!pd) { ipath_dev_err(dd, "Unable to allocate portdata for port " "0, failing\n"); ret = -ENOMEM; goto done; } dd->ipath_pd[0] = pd; dd->ipath_rcvtidcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); dd->ipath_rcvtidbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase); dd->ipath_rcvegrcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); dd->ipath_rcvegrbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase); dd->ipath_palign = ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign); dd->ipath_piobufbase = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize); dd->ipath_piosize2k = val & ~0U; dd->ipath_piosize4k = val >> 32; if (dd->ipath_piosize4k == 0 && ipath_mtu4096) ipath_mtu4096 = 0; /* 4KB not supported by this chip */ dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048; val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt); dd->ipath_piobcnt2k = val & ~0U; dd->ipath_piobcnt4k = val >> 32; dd->ipath_pio2kbase = (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) + (dd->ipath_piobufbase & 0xffffffff)); if (dd->ipath_piobcnt4k) { dd->ipath_pio4kbase = (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) + (dd->ipath_piobufbase >> 32)); /* * 4K buffers take 2 pages; we use roundup just to be * paranoid; we calculate it once here, rather than on * ever buf allocate */ dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k, dd->ipath_palign); ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p " "(%x aligned)\n", dd->ipath_piobcnt2k, dd->ipath_piosize2k, dd->ipath_pio2kbase, dd->ipath_piobcnt4k, dd->ipath_piosize4k, dd->ipath_pio4kbase, dd->ipath_4kalign); } else ipath_dbg("%u 2k piobufs @ %p\n", dd->ipath_piobcnt2k, dd->ipath_pio2kbase); done: return ret; } /** * init_chip_reset - re-initialize after a reset, or enable * @dd: the infinipath device * * sanity check at least some of the values after reset, and * ensure no receive or transmit (explicitly, in case reset * failed */ static int init_chip_reset(struct ipath_devdata *dd) { u32 rtmp; int i; unsigned long flags; /* * ensure chip does no sends or receives, tail updates, or * pioavail updates while we re-initialize */ dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift); for (i = 0; i < dd->ipath_portcnt; i++) { clear_bit(dd->ipath_r_portenable_shift + i, &dd->ipath_rcvctrl); clear_bit(dd->ipath_r_intravail_shift + i, &dd->ipath_rcvctrl); } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); dd->ipath_sendctrl = 0U; /* no sdma, etc */ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); if (rtmp != dd->ipath_rcvtidcnt) dev_info(&dd->pcidev->dev, "tidcnt was %u before " "reset, now %u, using original\n", dd->ipath_rcvtidcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase); if (rtmp != dd->ipath_rcvtidbase) dev_info(&dd->pcidev->dev, "tidbase was %u before " "reset, now %u, using original\n", dd->ipath_rcvtidbase, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); if (rtmp != dd->ipath_rcvegrcnt) dev_info(&dd->pcidev->dev, "egrcnt was %u before " "reset, now %u, using original\n", dd->ipath_rcvegrcnt, rtmp); rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase); if (rtmp != dd->ipath_rcvegrbase) dev_info(&dd->pcidev->dev, "egrbase was %u before " "reset, now %u, using original\n", dd->ipath_rcvegrbase, rtmp); return 0; } static int init_pioavailregs(struct ipath_devdata *dd) { int ret; dd->ipath_pioavailregs_dma = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys, GFP_KERNEL); if (!dd->ipath_pioavailregs_dma) { ipath_dev_err(dd, "failed to allocate PIOavail reg area " "in memory\n"); ret = -ENOMEM; goto done; } /* * we really want L2 cache aligned, but for current CPUs of * interest, they are the same. */ dd->ipath_statusp = (u64 *) ((char *)dd->ipath_pioavailregs_dma + ((2 * L1_CACHE_BYTES + dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); /* copy the current value now that it's really allocated */ *dd->ipath_statusp = dd->_ipath_status; /* * setup buffer to hold freeze msg, accessible to apps, * following statusp */ dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1]; /* and its length */ dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]); ret = 0; done: return ret; } /** * init_shadow_tids - allocate the shadow TID array * @dd: the infinipath device * * allocate the shadow TID array, so we can ipath_munlock previous * entries. It may make more sense to move the pageshadow to the * port data structure, so we only allocate memory for ports actually * in use, since we at 8k per port, now. */ static void init_shadow_tids(struct ipath_devdata *dd) { struct page **pages; dma_addr_t *addrs; pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(struct page *)); if (!pages) { ipath_dev_err(dd, "failed to allocate shadow page * " "array, no expected sends!\n"); dd->ipath_pageshadow = NULL; return; } addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(dma_addr_t)); if (!addrs) { ipath_dev_err(dd, "failed to allocate shadow dma handle " "array, no expected sends!\n"); vfree(pages); dd->ipath_pageshadow = NULL; return; } dd->ipath_pageshadow = pages; dd->ipath_physshadow = addrs; } static void enable_chip(struct ipath_devdata *dd, int reinit) { u32 val; u64 rcvmask; unsigned long flags; int i; if (!reinit) init_waitqueue_head(&ipath_state_wait); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); /* Enable PIO send, and update of PIOavail regs to memory. */ dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE | INFINIPATH_S_PIOBUFAVAILUPD; /* * Set the PIO avail update threshold to host memory * on chips that support it. */ if (dd->ipath_pioupd_thresh) dd->ipath_sendctrl |= dd->ipath_pioupd_thresh << INFINIPATH_S_UPDTHRESH_SHIFT; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); /* * Enable kernel ports' receive and receive interrupt. * Other ports done as user opens and inits them. */ rcvmask = 1ULL; dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) | (rcvmask << dd->ipath_r_intravail_shift); if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* * now ready for use. this should be cleared whenever we * detect a reset, or initiate one. */ dd->ipath_flags |= IPATH_INITTED; /* * Init our shadow copies of head from tail values, * and write head values to match. */ val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); /* Initialize so we interrupt on next packet received */ ipath_write_ureg(dd, ur_rcvhdrhead, dd->ipath_rhdrhead_intr_off | dd->ipath_pd[0]->port_head, 0); /* * by now pioavail updates to memory should have occurred, so * copy them into our working/shadow registers; this is in * case something went wrong with abort, but mostly to get the * initial values of the generation bit correct. */ for (i = 0; i < dd->ipath_pioavregs; i++) { __le64 pioavail; /* * Chip Errata bug 6641; even and odd qwords>3 are swapped. */ if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) pioavail = dd->ipath_pioavailregs_dma[i ^ 1]; else pioavail = dd->ipath_pioavailregs_dma[i]; /* * don't need to worry about ipath_pioavailkernel here * because we will call ipath_chg_pioavailkernel() later * in initialization, to busy out buffers as needed */ dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail); } /* can get counters, stats, etc. */ dd->ipath_flags |= IPATH_PRESENT; } static int init_housekeeping(struct ipath_devdata *dd, int reinit) { char boardn[40]; int ret = 0; /* * have to clear shadow copies of registers at init that are * not otherwise set here, or all kinds of bizarre things * happen with driver on chip reset */ dd->ipath_rcvhdrsize = 0; /* * Don't clear ipath_flags as 8bit mode was set before * entering this func. However, we do set the linkstate to * unknown, so we can watch for a transition. * PRESENT is set because we want register reads to work, * and the kernel infrastructure saw it in config space; * We clear it if we have failures. */ dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT; dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | IPATH_LINKDOWN | IPATH_LINKINIT); ipath_cdbg(VERBOSE, "Try to read spc chip revision\n"); dd->ipath_revision = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); /* * set up fundamental info we need to use the chip; we assume * if the revision reg and these regs are OK, we don't need to * special case the rest */ dd->ipath_sregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase); dd->ipath_cregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase); dd->ipath_uregbase = ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase); ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, " "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase, dd->ipath_uregbase, dd->ipath_cregbase); if ((dd->ipath_revision & 0xffffffff) == 0xffffffff || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { ipath_dev_err(dd, "Register read failures from chip, " "giving up initialization\n"); dd->ipath_flags &= ~IPATH_PRESENT; ret = -ENODEV; goto done; } /* clear diagctrl register, in case diags were running and crashed */ ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0); /* clear the initial reset flag, in case first driver load */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, INFINIPATH_E_RESET); ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n", (unsigned long long) dd->ipath_revision, dd->ipath_pcirev); if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) { ipath_dev_err(dd, "Driver only handles version %d, " "chip swversion is %d (%llx), failng\n", IPATH_CHIP_SWVERSION, (int)(dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK, (unsigned long long) dd->ipath_revision); ret = -ENOSYS; goto done; } dd->ipath_majrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_CHIPREVMAJOR_SHIFT) & INFINIPATH_R_CHIPREVMAJOR_MASK); dd->ipath_minrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT) & INFINIPATH_R_CHIPREVMINOR_MASK); dd->ipath_boardrev = (u8) ((dd->ipath_revision >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK); ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn); snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion), "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, " "SW Compat %u\n", IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn, (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) & INFINIPATH_R_ARCH_MASK, dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev, (unsigned)(dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & INFINIPATH_R_SOFTWARE_MASK); ipath_dbg("%s", dd->ipath_boardversion); if (ret) goto done; if (reinit) ret = init_chip_reset(dd); else ret = init_chip_first(dd); done: return ret; } static void verify_interrupt(unsigned long opaque) { struct ipath_devdata *dd = (struct ipath_devdata *) opaque; if (!dd) return; /* being torn down */ /* * If we don't have any interrupts, let the user know and * don't bother checking again. */ if (dd->ipath_int_counter == 0) { if (!dd->ipath_f_intr_fallback(dd)) dev_err(&dd->pcidev->dev, "No interrupts detected, " "not usable.\n"); else /* re-arm the timer to see if fallback works */ mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2); } else ipath_cdbg(VERBOSE, "%u interrupts at timer check\n", dd->ipath_int_counter); } /** * ipath_init_chip - do the actual initialization sequence on the chip * @dd: the infinipath device * @reinit: reinitializing, so don't allocate new memory * * Do the actual initialization sequence on the chip. This is done * both from the init routine called from the PCI infrastructure, and * when we reset the chip, or detect that it was reset internally, * or it's administratively re-enabled. * * Memory allocation here and in called routines is only done in * the first case (reinit == 0). We have to be careful, because even * without memory allocation, we need to re-write all the chip registers * TIDs, etc. after the reset or enable has completed. */ int ipath_init_chip(struct ipath_devdata *dd, int reinit) { int ret = 0; u32 kpiobufs, defkbufs; u32 piobufs, uports; u64 val; struct ipath_portdata *pd; gfp_t gfp_flags = GFP_USER | __GFP_COMP; ret = init_housekeeping(dd, reinit); if (ret) goto done; /* * We could bump this to allow for full rcvegrcnt + rcvtidcnt, * but then it no longer nicely fits power of two, and since * we now use routines that backend onto __get_free_pages, the * rest would be wasted. */ dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt, dd->ipath_rcvhdrcnt); /* * Set up the shadow copies of the piobufavail registers, * which we compare against the chip registers for now, and * the in memory DMA'ed copies of the registers. This has to * be done early, before we calculate lastport, etc. */ piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; /* * calc number of pioavail registers, and save it; we have 2 * bits per buffer. */ dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) / (sizeof(u64) * BITS_PER_BYTE / 2); uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0; if (piobufs > 144) defkbufs = 32 + dd->ipath_pioreserved; else defkbufs = 16 + dd->ipath_pioreserved; if (ipath_kpiobufs && (ipath_kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) { int i = (int) piobufs - (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); if (i < 1) i = 1; dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " "%d for kernel leaves too few for %d user ports " "(%d each); using %u\n", ipath_kpiobufs, piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i); /* * shouldn't change ipath_kpiobufs, because could be * different for different devices... */ kpiobufs = i; } else if (ipath_kpiobufs) kpiobufs = ipath_kpiobufs; else kpiobufs = defkbufs; dd->ipath_lastport_piobuf = piobufs - kpiobufs; dd->ipath_pbufsport = uports ? dd->ipath_lastport_piobuf / uports : 0; /* if not an even divisor, some user ports get extra buffers */ dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports); if (dd->ipath_ports_extrabuf) ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to " "ports <= %u\n", dd->ipath_pbufsport, dd->ipath_ports_extrabuf); dd->ipath_lastpioindex = 0; dd->ipath_lastpioindexl = dd->ipath_piobcnt2k; /* ipath_pioavailshadow initialized earlier */ ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u " "each for %u user ports\n", kpiobufs, piobufs, dd->ipath_pbufsport, uports); ret = dd->ipath_f_early_init(dd); if (ret) { ipath_dev_err(dd, "Early initialization failure\n"); goto done; } /* * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be * done after early_init. */ dd->ipath_hdrqlast = dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize, dd->ipath_rcvhdrentsize); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize, dd->ipath_rcvhdrsize); if (!reinit) { ret = init_pioavailregs(dd); init_shadow_tids(dd); if (ret) goto done; } ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr, dd->ipath_pioavailregs_phys); /* * this is to detect s/w errors, which the h/w works around by * ignoring the low 6 bits of address, if it wasn't aligned. */ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr); if (val != dd->ipath_pioavailregs_phys) { ipath_dev_err(dd, "Catastrophic software error, " "SendPIOAvailAddr written as %lx, " "read back as %llx\n", (unsigned long) dd->ipath_pioavailregs_phys, (unsigned long long) val); ret = -EINVAL; goto done; } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP); /* * make sure we are not in freeze, and PIO send enabled, so * writes to pbc happen */ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); /* * before error clears, since we expect serdes pll errors during * this, the first time after reset */ if (bringup_link(dd)) { dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n"); ret = -ENETDOWN; goto done; } /* * clear any "expected" hwerrs from reset and/or initialization * clear any that aren't enabled (at least this once), and then * set the enable mask */ dd->ipath_f_init_hwerrors(dd); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); /* clear all */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL); /* enable errors that are masked, at least this first time. */ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, ~dd->ipath_maskederrs); dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */ dd->ipath_errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask); /* clear any interrupts up to this point (ints still not enabled) */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL); dd->ipath_f_tidtemplate(dd); /* * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing * re-init, the simplest way to handle this is to free * existing, and re-allocate. * Need to re-create rest of port 0 portdata as well. */ pd = dd->ipath_pd[0]; if (reinit) { struct ipath_portdata *npd; /* * Alloc and init new ipath_portdata for port0, * Then free old pd. Could lead to fragmentation, but also * makes later support for hot-swap easier. */ npd = create_portdata0(dd); if (npd) { ipath_free_pddata(dd, pd); dd->ipath_pd[0] = npd; pd = npd; } else { ipath_dev_err(dd, "Unable to allocate portdata" " for port 0, failing\n"); ret = -ENOMEM; goto done; } } ret = ipath_create_rcvhdrq(dd, pd); if (!ret) ret = create_port0_egr(dd); if (ret) { ipath_dev_err(dd, "failed to allocate kernel port's " "rcvhdrq and/or egr bufs\n"); goto done; } else enable_chip(dd, reinit); /* after enable_chip, so pioavailshadow setup */ ipath_chg_pioavailkernel(dd, 0, piobufs, 1); /* * Cancel any possible active sends from early driver load. * Follows early_init because some chips have to initialize * PIO buffers in early_init to avoid false parity errors. * After enable and ipath_chg_pioavailkernel so we can safely * enable pioavail updates and PIOENABLE; packets are now * ready to go out. */ ipath_cancel_sends(dd, 1); if (!reinit) { /* * Used when we close a port, for DMA already in flight * at close. */ dd->ipath_dummy_hdrq = dma_alloc_coherent( &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size, &dd->ipath_dummy_hdrq_phys, gfp_flags); if (!dd->ipath_dummy_hdrq) { dev_info(&dd->pcidev->dev, "Couldn't allocate 0x%lx bytes for dummy hdrq\n", dd->ipath_pd[0]->port_rcvhdrq_size); /* fallback to just 0'ing */ dd->ipath_dummy_hdrq_phys = 0UL; } } /* * cause retrigger of pending interrupts ignored during init, * even if we had errors */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); if (!dd->ipath_stats_timer_active) { /* * first init, or after an admin disable/enable * set up stats retrieval timer, even if we had errors * in last portion of setup */ init_timer(&dd->ipath_stats_timer); dd->ipath_stats_timer.function = ipath_get_faststats; dd->ipath_stats_timer.data = (unsigned long) dd; /* every 5 seconds; */ dd->ipath_stats_timer.expires = jiffies + 5 * HZ; /* takes ~16 seconds to overflow at full IB 4x bandwdith */ add_timer(&dd->ipath_stats_timer); dd->ipath_stats_timer_active = 1; } /* Set up SendDMA if chip supports it */ if (dd->ipath_flags & IPATH_HAS_SEND_DMA) ret = setup_sdma(dd); /* Set up HoL state */ init_timer(&dd->ipath_hol_timer); dd->ipath_hol_timer.function = ipath_hol_event; dd->ipath_hol_timer.data = (unsigned long)dd; dd->ipath_hol_state = IPATH_HOL_UP; done: if (!ret) { *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT; if (!dd->ipath_f_intrsetup(dd)) { /* now we can enable all interrupts from the chip */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL); /* force re-interrupt of any pending interrupts. */ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL); /* chip is usable; mark it as initialized */ *dd->ipath_statusp |= IPATH_STATUS_INITTED; /* * setup to verify we get an interrupt, and fallback * to an alternate if necessary and possible */ if (!reinit) { init_timer(&dd->ipath_intrchk_timer); dd->ipath_intrchk_timer.function = verify_interrupt; dd->ipath_intrchk_timer.data = (unsigned long) dd; } dd->ipath_intrchk_timer.expires = jiffies + HZ/2; add_timer(&dd->ipath_intrchk_timer); } else ipath_dev_err(dd, "No interrupts enabled, couldn't " "setup interrupt address\n"); if (dd->ipath_cfgports > ipath_stats.sps_nports) /* * sps_nports is a global, so, we set it to * the highest number of ports of any of the * chips we find; we never decrement it, at * least for now. Since this might have changed * over disable/enable or prior to reset, always * do the check and potentially adjust. */ ipath_stats.sps_nports = dd->ipath_cfgports; } else ipath_dbg("Failed (%d) to initialize chip\n", ret); /* if ret is non-zero, we probably should do some cleanup here... */ return ret; } static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp) { struct ipath_devdata *dd; unsigned long flags; unsigned short val; int ret; ret = ipath_parse_ushort(str, &val); spin_lock_irqsave(&ipath_devs_lock, flags); if (ret < 0) goto bail; if (val == 0) { ret = -EINVAL; goto bail; } list_for_each_entry(dd, &ipath_dev_list, ipath_list) { if (dd->ipath_kregbase) continue; if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) { ipath_dev_err( dd, "Allocating %d PIO bufs for kernel leaves " "too few for %d user ports (%d each)\n", val, dd->ipath_cfgports - 1, IPATH_MIN_USER_PORT_BUFCNT); ret = -EINVAL; goto bail; } dd->ipath_lastport_piobuf = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val; } ipath_kpiobufs = val; ret = 0; bail: spin_unlock_irqrestore(&ipath_devs_lock, flags); return ret; }
gpl-2.0
bsmitty83/B-Team4.3
drivers/hwmon/thmc50.c
4334
14381
/* thmc50.c - Part of lm_sensors, Linux kernel modules for hardware monitoring Copyright (C) 2007 Krzysztof Helt <krzysztof.h1@wp.pl> Based on 2.4 driver by Frodo Looijaard <frodol@dds.nl> and Philip Edelbrock <phil@netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> MODULE_LICENSE("GPL"); /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ enum chips { thmc50, adm1022 }; static unsigned short adm1022_temp3[16]; static unsigned int adm1022_temp3_num; module_param_array(adm1022_temp3, ushort, &adm1022_temp3_num, 0); MODULE_PARM_DESC(adm1022_temp3, "List of adapter,address pairs " "to enable 3rd temperature (ADM1022 only)"); /* Many THMC50 constants specified below */ /* The THMC50 registers */ #define THMC50_REG_CONF 0x40 #define THMC50_REG_COMPANY_ID 0x3E #define THMC50_REG_DIE_CODE 0x3F #define THMC50_REG_ANALOG_OUT 0x19 /* * The mirror status register cannot be used as * reading it does not clear alarms. */ #define THMC50_REG_INTR 0x41 static const u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 }; static const u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C }; static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B }; static const u8 THMC50_REG_TEMP_CRITICAL[] = { 0x13, 0x14, 0x14 }; static const u8 THMC50_REG_TEMP_DEFAULT[] = { 0x17, 0x18, 0x18 }; #define THMC50_REG_CONF_nFANOFF 0x20 #define THMC50_REG_CONF_PROGRAMMED 0x08 /* Each client has this additional data */ struct thmc50_data { struct device *hwmon_dev; struct mutex update_lock; enum chips type; unsigned long last_updated; /* In jiffies */ char has_temp3; /* !=0 if it is ADM1022 in temp3 mode */ char valid; /* !=0 if following fields are valid */ /* Register values */ s8 temp_input[3]; s8 temp_max[3]; s8 temp_min[3]; s8 temp_critical[3]; u8 analog_out; u8 alarms; }; static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info); static int thmc50_probe(struct i2c_client *client, const struct i2c_device_id *id); static int thmc50_remove(struct i2c_client *client); static void thmc50_init_client(struct i2c_client *client); static struct thmc50_data *thmc50_update_device(struct device *dev); static const struct i2c_device_id thmc50_id[] = { { "adm1022", adm1022 }, { "thmc50", thmc50 }, { } }; MODULE_DEVICE_TABLE(i2c, thmc50_id); static struct i2c_driver thmc50_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "thmc50", }, .probe = thmc50_probe, .remove = thmc50_remove, .id_table = thmc50_id, .detect = thmc50_detect, .address_list = normal_i2c, }; static ssize_t show_analog_out(struct device *dev, struct device_attribute *attr, char *buf) { struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->analog_out); } static ssize_t set_analog_out(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int tmp = simple_strtoul(buf, NULL, 10); int config; mutex_lock(&data->update_lock); data->analog_out = SENSORS_LIMIT(tmp, 0, 255); i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT, data->analog_out); config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); if (data->analog_out == 0) config &= ~THMC50_REG_CONF_nFANOFF; else config |= THMC50_REG_CONF_nFANOFF; i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); mutex_unlock(&data->update_lock); return count; } /* There is only one PWM mode = DC */ static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "0\n"); } /* Temperatures */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_input[nr] * 1000); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_min[nr] * 1000); } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_min[nr] = SENSORS_LIMIT(val / 1000, -128, 127); i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MIN[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_max[nr] * 1000); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(attr)->index; struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_max[nr] = SENSORS_LIMIT(val / 1000, -128, 127); i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MAX[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_critical(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%d\n", data->temp_critical[nr] * 1000); } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int index = to_sensor_dev_attr(attr)->index; struct thmc50_data *data = thmc50_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> index) & 1); } #define temp_reg(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \ NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO, \ show_temp_critical, NULL, offset - 1); temp_reg(1); temp_reg(2); temp_reg(3); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_analog_out, set_analog_out, 0); static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0); static struct attribute *thmc50_attributes[] = { &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm1_mode.dev_attr.attr, NULL }; static const struct attribute_group thmc50_group = { .attrs = thmc50_attributes, }; /* for ADM1022 3rd temperature mode */ static struct attribute *temp3_attributes[] = { &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, NULL }; static const struct attribute_group temp3_group = { .attrs = temp3_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int thmc50_detect(struct i2c_client *client, struct i2c_board_info *info) { unsigned company; unsigned revision; unsigned config; struct i2c_adapter *adapter = client->adapter; const char *type_name; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { pr_debug("thmc50: detect failed, " "smbus byte data not supported!\n"); return -ENODEV; } pr_debug("thmc50: Probing for THMC50 at 0x%2X on bus %d\n", client->addr, i2c_adapter_id(client->adapter)); company = i2c_smbus_read_byte_data(client, THMC50_REG_COMPANY_ID); revision = i2c_smbus_read_byte_data(client, THMC50_REG_DIE_CODE); config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); if (revision < 0xc0 || (config & 0x10)) return -ENODEV; if (company == 0x41) { int id = i2c_adapter_id(client->adapter); int i; type_name = "adm1022"; for (i = 0; i + 1 < adm1022_temp3_num; i += 2) if (adm1022_temp3[i] == id && adm1022_temp3[i + 1] == client->addr) { /* enable 2nd remote temp */ config |= (1 << 7); i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); break; } } else if (company == 0x49) { type_name = "thmc50"; } else { pr_debug("thmc50: Detection of THMC50/ADM1022 failed\n"); return -ENODEV; } pr_debug("thmc50: Detected %s (version %x, revision %x)\n", type_name, (revision >> 4) - 0xc, revision & 0xf); strlcpy(info->type, type_name, I2C_NAME_SIZE); return 0; } static int thmc50_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct thmc50_data *data; int err; data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL); if (!data) { pr_debug("thmc50: detect failed, kzalloc failed!\n"); err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->type = id->driver_data; mutex_init(&data->update_lock); thmc50_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &thmc50_group))) goto exit_free; /* Register ADM1022 sysfs hooks */ if (data->has_temp3) if ((err = sysfs_create_group(&client->dev.kobj, &temp3_group))) goto exit_remove_sysfs_thmc50; /* Register a new directory entry with module sensors */ data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_sysfs; } return 0; exit_remove_sysfs: if (data->has_temp3) sysfs_remove_group(&client->dev.kobj, &temp3_group); exit_remove_sysfs_thmc50: sysfs_remove_group(&client->dev.kobj, &thmc50_group); exit_free: kfree(data); exit: return err; } static int thmc50_remove(struct i2c_client *client) { struct thmc50_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &thmc50_group); if (data->has_temp3) sysfs_remove_group(&client->dev.kobj, &temp3_group); kfree(data); return 0; } static void thmc50_init_client(struct i2c_client *client) { struct thmc50_data *data = i2c_get_clientdata(client); int config; data->analog_out = i2c_smbus_read_byte_data(client, THMC50_REG_ANALOG_OUT); /* set up to at least 1 */ if (data->analog_out == 0) { data->analog_out = 1; i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT, data->analog_out); } config = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); config |= 0x1; /* start the chip if it is in standby mode */ if (data->type == adm1022 && (config & (1 << 7))) data->has_temp3 = 1; i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config); } static struct thmc50_data *thmc50_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct thmc50_data *data = i2c_get_clientdata(client); int timeout = HZ / 5 + (data->type == thmc50 ? HZ : 0); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + timeout) || !data->valid) { int temps = data->has_temp3 ? 3 : 2; int i; int prog = i2c_smbus_read_byte_data(client, THMC50_REG_CONF); prog &= THMC50_REG_CONF_PROGRAMMED; for (i = 0; i < temps; i++) { data->temp_input[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP[i]); data->temp_max[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP_MAX[i]); data->temp_min[i] = i2c_smbus_read_byte_data(client, THMC50_REG_TEMP_MIN[i]); data->temp_critical[i] = i2c_smbus_read_byte_data(client, prog ? THMC50_REG_TEMP_CRITICAL[i] : THMC50_REG_TEMP_DEFAULT[i]); } data->analog_out = i2c_smbus_read_byte_data(client, THMC50_REG_ANALOG_OUT); data->alarms = i2c_smbus_read_byte_data(client, THMC50_REG_INTR); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init sm_thmc50_init(void) { return i2c_add_driver(&thmc50_driver); } static void __exit sm_thmc50_exit(void) { i2c_del_driver(&thmc50_driver); } MODULE_AUTHOR("Krzysztof Helt <krzysztof.h1@wp.pl>"); MODULE_DESCRIPTION("THMC50 driver"); module_init(sm_thmc50_init); module_exit(sm_thmc50_exit);
gpl-2.0
MoKee/android_kernel_htc_villec2
arch/sh/mm/fault_64.c
4590
7804
/* * The SH64 TLB miss. * * Original code from fault.c * Copyright (C) 2000, 2001 Paolo Alberelli * * Fast PTE->TLB refill path * Copyright (C) 2003 Richard.Curnow@superh.com * * IMPORTANT NOTES : * The do_fast_page_fault function is called from a context in entry.S * where very few registers have been saved. In particular, the code in * this file must be compiled not to use ANY caller-save registers that * are not part of the restricted save set. Also, it means that code in * this file must not make calls to functions elsewhere in the kernel, or * else the excepting context will see corruption in its caller-save * registers. Plus, the entry.S save area is non-reentrant, so this code * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic * on any exception. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <asm/system.h> #include <asm/tlb.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <cpu/registers.h> /* Callable from fault.c, so not static */ inline void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte) { unsigned long long ptel; unsigned long long pteh=0; struct tlb_info *tlbp; unsigned long long next; /* Get PTEL first */ ptel = pte_val(*pte); /* * Set PTEH register */ pteh = neff_sign_extend(address & MMU_VPN_MASK); /* Set the ASID. */ pteh |= get_asid() << PTEH_ASID_SHIFT; pteh |= PTEH_VALID; /* Set PTEL register, set_pte has performed the sign extension */ ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb); next = tlbp->next; __flush_tlb_slot(next); asm volatile ("putcfg %0,1,%2\n\n\t" "putcfg %0,0,%1\n" : : "r" (next), "r" (pteh), "r" (ptel) ); next += TLB_STEP; if (next > tlbp->last) next = tlbp->first; tlbp->next = next; } static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags, unsigned long long textaccess, unsigned long address) { pgd_t *dir; pud_t *pud; pmd_t *pmd; static pte_t *pte; pte_t entry; dir = pgd_offset_k(address); pud = pud_offset(dir, address); if (pud_none_or_clear_bad(pud)) return 0; pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 0; pte = pte_offset_kernel(pmd, address); entry = *pte; if (pte_none(entry) || !pte_present(entry)) return 0; if ((pte_val(entry) & protection_flags) != protection_flags) return 0; __do_tlb_refill(address, textaccess, pte); return 1; } static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags, unsigned long long textaccess, unsigned long address) { pgd_t *dir; pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t entry; /* NB. The PGD currently only contains a single entry - there is no page table tree stored for the top half of the address space since virtual pages in that region should never be mapped in user mode. (In kernel mode, the only things in that region are the 512Mb super page (locked in), and vmalloc (modules) + I/O device pages (handled by handle_vmalloc_fault), so no PGD for the upper half is required by kernel mode either). See how mm->pgd is allocated and initialised in pgd_alloc to see why the next test is necessary. - RPC */ if (address >= (unsigned long) TASK_SIZE) /* upper half - never has page table entries. */ return 0; dir = pgd_offset(mm, address); if (pgd_none(*dir) || !pgd_present(*dir)) return 0; if (!pgd_present(*dir)) return 0; pud = pud_offset(dir, address); if (pud_none(*pud) || !pud_present(*pud)) return 0; pmd = pmd_offset(pud, address); if (pmd_none(*pmd) || !pmd_present(*pmd)) return 0; pte = pte_offset_kernel(pmd, address); entry = *pte; if (pte_none(entry) || !pte_present(entry)) return 0; /* * If the page doesn't have sufficient protection bits set to * service the kind of fault being handled, there's not much * point doing the TLB refill. Punt the fault to the general * handler. */ if ((pte_val(entry) & protection_flags) != protection_flags) return 0; __do_tlb_refill(address, textaccess, pte); return 1; } /* * Put all this information into one structure so that everything is just * arithmetic relative to a single base address. This reduces the number * of movi/shori pairs needed just to load addresses of static data. */ struct expevt_lookup { unsigned short protection_flags[8]; unsigned char is_text_access[8]; unsigned char is_write_access[8]; }; #define PRU (1<<9) #define PRW (1<<8) #define PRX (1<<7) #define PRR (1<<6) #define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED) #define YOUNG (_PAGE_ACCESSED) /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether the fault happened in user mode or privileged mode. */ static struct expevt_lookup expevt_lookup_table = { .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW}, .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0} }; /* This routine handles page faults that can be serviced just by refilling a TLB entry from an existing page table entry. (This case represents a very large majority of page faults.) Return 1 if the fault was successfully handled. Return 0 if the fault could not be handled. (This leads into the general fault handling in fault.c which deals with mapping file-backed pages, stack growth, segmentation faults, swapping etc etc) */ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt, unsigned long address) { struct task_struct *tsk; struct mm_struct *mm; unsigned long long textaccess; unsigned long long protection_flags; unsigned long long index; unsigned long long expevt4; /* The next few lines implement a way of hashing EXPEVT into a * small array index which can be used to lookup parameters * specific to the type of TLBMISS being handled. * * Note: * ITLBMISS has EXPEVT==0xa40 * RTLBMISS has EXPEVT==0x040 * WTLBMISS has EXPEVT==0x060 */ expevt4 = (expevt >> 4); /* TODO : xor ssr_md into this expression too. Then we can check * that PRU is set when it needs to be. */ index = expevt4 ^ (expevt4 >> 5); index &= 7; protection_flags = expevt_lookup_table.protection_flags[index]; textaccess = expevt_lookup_table.is_text_access[index]; /* SIM * Note this is now called with interrupts still disabled * This is to cope with being called for a missing IO port * address with interrupts disabled. This should be fixed as * soon as we have a better 'fast path' miss handler. * * Plus take care how you try and debug this stuff. * For example, writing debug data to a port which you * have just faulted on is not going to work. */ tsk = current; mm = tsk->mm; if ((address >= VMALLOC_START && address < VMALLOC_END) || (address >= IOBASE_VADDR && address < IOBASE_END)) { if (ssr_md) /* * Process-contexts can never have this address * range mapped */ if (handle_vmalloc_fault(mm, protection_flags, textaccess, address)) return 1; } else if (!in_interrupt() && mm) { if (handle_tlbmiss(mm, protection_flags, textaccess, address)) return 1; } return 0; }
gpl-2.0
lyfkevin/Wind_iproj_JB_kernel
arch/arm/mach-omap1/dma.c
4846
8656
/* * OMAP1/OMAP7xx - specific DMA driver * * Copyright (C) 2003 - 2008 Nokia Corporation * Author: Juha Yrjölä <juha.yrjola@nokia.com> * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> * Graphics DMA and LCD DMA graphics tranformations * by Imre Deak <imre.deak@nokia.com> * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * Converted DMA library into platform driver * - G, Manjunath Kondaiah <manjugk@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/io.h> #include <plat/dma.h> #include <plat/tc.h> #include <plat/irqs.h> #define OMAP1_DMA_BASE (0xfffed800) #define OMAP1_LOGICAL_DMA_CH_COUNT 17 #define OMAP1_DMA_STRIDE 0x40 static u32 errata; static u32 enable_1510_mode; static u8 dma_stride; static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end; static u16 reg_map[] = { [GCR] = 0x400, [GSCR] = 0x404, [GRST1] = 0x408, [HW_ID] = 0x442, [PCH2_ID] = 0x444, [PCH0_ID] = 0x446, [PCH1_ID] = 0x448, [PCHG_ID] = 0x44a, [PCHD_ID] = 0x44c, [CAPS_0] = 0x44e, [CAPS_1] = 0x452, [CAPS_2] = 0x456, [CAPS_3] = 0x458, [CAPS_4] = 0x45a, [PCH2_SR] = 0x460, [PCH0_SR] = 0x480, [PCH1_SR] = 0x482, [PCHD_SR] = 0x4c0, /* Common Registers */ [CSDP] = 0x00, [CCR] = 0x02, [CICR] = 0x04, [CSR] = 0x06, [CEN] = 0x10, [CFN] = 0x12, [CSFI] = 0x14, [CSEI] = 0x16, [CPC] = 0x18, /* 15xx only */ [CSAC] = 0x18, [CDAC] = 0x1a, [CDEI] = 0x1c, [CDFI] = 0x1e, [CLNK_CTRL] = 0x28, /* Channel specific register offsets */ [CSSA] = 0x08, [CDSA] = 0x0c, [COLOR] = 0x20, [CCR2] = 0x24, [LCH_CTRL] = 0x2a, }; static struct resource res[] __initdata = { [0] = { .start = OMAP1_DMA_BASE, .end = OMAP1_DMA_BASE + SZ_2K - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "0", .start = INT_DMA_CH0_6, .flags = IORESOURCE_IRQ, }, [2] = { .name = "1", .start = INT_DMA_CH1_7, .flags = IORESOURCE_IRQ, }, [3] = { .name = "2", .start = INT_DMA_CH2_8, .flags = IORESOURCE_IRQ, }, [4] = { .name = "3", .start = INT_DMA_CH3, .flags = IORESOURCE_IRQ, }, [5] = { .name = "4", .start = INT_DMA_CH4, .flags = IORESOURCE_IRQ, }, [6] = { .name = "5", .start = INT_DMA_CH5, .flags = IORESOURCE_IRQ, }, /* Handled in lcd_dma.c */ [7] = { .name = "6", .start = INT_1610_DMA_CH6, .flags = IORESOURCE_IRQ, }, /* irq's for omap16xx and omap7xx */ [8] = { .name = "7", .start = INT_1610_DMA_CH7, .flags = IORESOURCE_IRQ, }, [9] = { .name = "8", .start = INT_1610_DMA_CH8, .flags = IORESOURCE_IRQ, }, [10] = { .name = "9", .start = INT_1610_DMA_CH9, .flags = IORESOURCE_IRQ, }, [11] = { .name = "10", .start = INT_1610_DMA_CH10, .flags = IORESOURCE_IRQ, }, [12] = { .name = "11", .start = INT_1610_DMA_CH11, .flags = IORESOURCE_IRQ, }, [13] = { .name = "12", .start = INT_1610_DMA_CH12, .flags = IORESOURCE_IRQ, }, [14] = { .name = "13", .start = INT_1610_DMA_CH13, .flags = IORESOURCE_IRQ, }, [15] = { .name = "14", .start = INT_1610_DMA_CH14, .flags = IORESOURCE_IRQ, }, [16] = { .name = "15", .start = INT_1610_DMA_CH15, .flags = IORESOURCE_IRQ, }, [17] = { .name = "16", .start = INT_DMA_LCD, .flags = IORESOURCE_IRQ, }, }; static void __iomem *dma_base; static inline void dma_write(u32 val, int reg, int lch) { u8 stride; u32 offset; stride = (reg >= dma_common_ch_start) ? dma_stride : 0; offset = reg_map[reg] + (stride * lch); __raw_writew(val, dma_base + offset); if ((reg > CLNK_CTRL && reg < CCEN) || (reg > PCHD_ID && reg < CAPS_2)) { u32 offset2 = reg_map[reg] + 2 + (stride * lch); __raw_writew(val >> 16, dma_base + offset2); } } static inline u32 dma_read(int reg, int lch) { u8 stride; u32 offset, val; stride = (reg >= dma_common_ch_start) ? dma_stride : 0; offset = reg_map[reg] + (stride * lch); val = __raw_readw(dma_base + offset); if ((reg > CLNK_CTRL && reg < CCEN) || (reg > PCHD_ID && reg < CAPS_2)) { u16 upper; u32 offset2 = reg_map[reg] + 2 + (stride * lch); upper = __raw_readw(dma_base + offset2); val |= (upper << 16); } return val; } static void omap1_clear_lch_regs(int lch) { int i = dma_common_ch_start; for (; i <= dma_common_ch_end; i += 1) dma_write(0, i, lch); } static void omap1_clear_dma(int lch) { u32 l; l = dma_read(CCR, lch); l &= ~OMAP_DMA_CCR_EN; dma_write(l, CCR, lch); /* Clear pending interrupts */ l = dma_read(CSR, lch); } static void omap1_show_dma_caps(void) { if (enable_1510_mode) { printk(KERN_INFO "DMA support for OMAP15xx initialized\n"); } else { u16 w; printk(KERN_INFO "OMAP DMA hardware version %d\n", dma_read(HW_ID, 0)); printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n", dma_read(CAPS_0, 0), dma_read(CAPS_1, 0), dma_read(CAPS_2, 0), dma_read(CAPS_3, 0), dma_read(CAPS_4, 0)); /* Disable OMAP 3.0/3.1 compatibility mode. */ w = dma_read(GSCR, 0); w |= 1 << 3; dma_write(w, GSCR, 0); } return; } static u32 configure_dma_errata(void) { /* * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is * read before the DMA controller finished disabling the channel. */ if (!cpu_is_omap15xx()) SET_DMA_ERRATA(DMA_ERRATA_3_3); return errata; } static int __init omap1_system_dma_init(void) { struct omap_system_dma_plat_info *p; struct omap_dma_dev_attr *d; struct platform_device *pdev; int ret; pdev = platform_device_alloc("omap_dma_system", 0); if (!pdev) { pr_err("%s: Unable to device alloc for dma\n", __func__); return -ENOMEM; } dma_base = ioremap(res[0].start, resource_size(&res[0])); if (!dma_base) { pr_err("%s: Unable to ioremap\n", __func__); ret = -ENODEV; goto exit_device_put; } ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_device_put; } p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); if (!p) { dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", __func__, pdev->name); ret = -ENOMEM; goto exit_device_del; } d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); if (!d) { dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n", __func__, pdev->name); ret = -ENOMEM; goto exit_release_p; } d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; /* Valid attributes for omap1 plus processors */ if (cpu_is_omap15xx()) d->dev_caps = ENABLE_1510_MODE; enable_1510_mode = d->dev_caps & ENABLE_1510_MODE; d->dev_caps |= SRC_PORT; d->dev_caps |= DST_PORT; d->dev_caps |= SRC_INDEX; d->dev_caps |= DST_INDEX; d->dev_caps |= IS_BURST_ONLY4; d->dev_caps |= CLEAR_CSR_ON_READ; d->dev_caps |= IS_WORD_16; d->chan = kzalloc(sizeof(struct omap_dma_lch) * (d->lch_count), GFP_KERNEL); if (!d->chan) { dev_err(&pdev->dev, "%s: Memory allocation failed" "for d->chan!!!\n", __func__); goto exit_release_d; } if (cpu_is_omap15xx()) d->chan_count = 9; else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { if (!(d->dev_caps & ENABLE_1510_MODE)) d->chan_count = 16; else d->chan_count = 9; } p->dma_attr = d; p->show_dma_caps = omap1_show_dma_caps; p->clear_lch_regs = omap1_clear_lch_regs; p->clear_dma = omap1_clear_dma; p->dma_write = dma_write; p->dma_read = dma_read; p->disable_irq_lch = NULL; p->errata = configure_dma_errata(); ret = platform_device_add_data(pdev, p, sizeof(*p)); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_release_chan; } ret = platform_device_add(pdev); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); goto exit_release_chan; } dma_stride = OMAP1_DMA_STRIDE; dma_common_ch_start = CPC; dma_common_ch_end = COLOR; return ret; exit_release_chan: kfree(d->chan); exit_release_d: kfree(d); exit_release_p: kfree(p); exit_device_del: platform_device_del(pdev); exit_device_put: platform_device_put(pdev); return ret; } arch_initcall(omap1_system_dma_init);
gpl-2.0
Endika/linux
arch/mips/txx9/generic/irq_tx4939.c
4846
5512
/* * TX4939 irq routines * Based on linux/arch/mips/kernel/irq_txx9.c, * and RBTX49xx patch from CELF patch archive. * * Copyright 2001, 2003-2005 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ahennessy@mvista.com * source@mvista.com * Copyright (C) 2000-2001,2005-2007 Toshiba Corporation * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ /* * TX4939 defines 64 IRQs. * Similer to irq_txx9.c but different register layouts. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/types.h> #include <asm/irq_cpu.h> #include <asm/txx9irq.h> #include <asm/txx9/tx4939.h> /* IRCER : Int. Control Enable */ #define TXx9_IRCER_ICE 0x00000001 /* IRCR : Int. Control */ #define TXx9_IRCR_LOW 0x00000000 #define TXx9_IRCR_HIGH 0x00000001 #define TXx9_IRCR_DOWN 0x00000002 #define TXx9_IRCR_UP 0x00000003 #define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002) /* IRSCR : Int. Status Control */ #define TXx9_IRSCR_EIClrE 0x00000100 #define TXx9_IRSCR_EIClr_MASK 0x0000000f /* IRCSR : Int. Current Status */ #define TXx9_IRCSR_IF 0x00010000 #define irc_dlevel 0 #define irc_elevel 1 static struct { unsigned char level; unsigned char mode; } tx4939irq[TX4939_NUM_IR] __read_mostly; static void tx4939_irq_unmask(struct irq_data *d) { unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; u32 __iomem *lvlp; int ofs; if (irq_nr < 32) { irq_nr--; lvlp = &tx4939_ircptr->lvl[(irq_nr % 16) / 2].r; } else { irq_nr -= 32; lvlp = &tx4939_ircptr->lvl[8 + (irq_nr % 16) / 2].r; } ofs = (irq_nr & 16) + (irq_nr & 1) * 8; __raw_writel((__raw_readl(lvlp) & ~(0xff << ofs)) | (tx4939irq[irq_nr].level << ofs), lvlp); } static inline void tx4939_irq_mask(struct irq_data *d) { unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; u32 __iomem *lvlp; int ofs; if (irq_nr < 32) { irq_nr--; lvlp = &tx4939_ircptr->lvl[(irq_nr % 16) / 2].r; } else { irq_nr -= 32; lvlp = &tx4939_ircptr->lvl[8 + (irq_nr % 16) / 2].r; } ofs = (irq_nr & 16) + (irq_nr & 1) * 8; __raw_writel((__raw_readl(lvlp) & ~(0xff << ofs)) | (irc_dlevel << ofs), lvlp); mmiowb(); } static void tx4939_irq_mask_ack(struct irq_data *d) { unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; tx4939_irq_mask(d); if (TXx9_IRCR_EDGE(tx4939irq[irq_nr].mode)) { irq_nr--; /* clear edge detection */ __raw_writel((TXx9_IRSCR_EIClrE | (irq_nr & 0xf)) << (irq_nr & 0x10), &tx4939_ircptr->edc.r); } } static int tx4939_irq_set_type(struct irq_data *d, unsigned int flow_type) { unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; u32 cr; u32 __iomem *crp; int ofs; int mode; if (flow_type & IRQF_TRIGGER_PROBE) return 0; switch (flow_type & IRQF_TRIGGER_MASK) { case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break; case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break; case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break; case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break; default: return -EINVAL; } if (irq_nr < 32) { irq_nr--; crp = &tx4939_ircptr->dm[(irq_nr & 8) >> 3].r; } else { irq_nr -= 32; crp = &tx4939_ircptr->dm2[((irq_nr & 8) >> 3)].r; } ofs = (((irq_nr & 16) >> 1) | (irq_nr & (8 - 1))) * 2; cr = __raw_readl(crp); cr &= ~(0x3 << ofs); cr |= (mode & 0x3) << ofs; __raw_writel(cr, crp); tx4939irq[irq_nr].mode = mode; return 0; } static struct irq_chip tx4939_irq_chip = { .name = "TX4939", .irq_ack = tx4939_irq_mask_ack, .irq_mask = tx4939_irq_mask, .irq_mask_ack = tx4939_irq_mask_ack, .irq_unmask = tx4939_irq_unmask, .irq_set_type = tx4939_irq_set_type, }; static int tx4939_irq_set_pri(int irc_irq, int new_pri) { int old_pri; if ((unsigned int)irc_irq >= TX4939_NUM_IR) return 0; old_pri = tx4939irq[irc_irq].level; tx4939irq[irc_irq].level = new_pri; return old_pri; } void __init tx4939_irq_init(void) { int i; mips_cpu_irq_init(); /* disable interrupt control */ __raw_writel(0, &tx4939_ircptr->den.r); __raw_writel(0, &tx4939_ircptr->maskint.r); __raw_writel(0, &tx4939_ircptr->maskext.r); /* irq_base + 0 is not used */ for (i = 1; i < TX4939_NUM_IR; i++) { tx4939irq[i].level = 4; /* middle level */ tx4939irq[i].mode = TXx9_IRCR_LOW; irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &tx4939_irq_chip, handle_level_irq); } /* mask all IRC interrupts */ __raw_writel(0, &tx4939_ircptr->msk.r); for (i = 0; i < 16; i++) __raw_writel(0, &tx4939_ircptr->lvl[i].r); /* setup IRC interrupt mode (Low Active) */ for (i = 0; i < 2; i++) __raw_writel(0, &tx4939_ircptr->dm[i].r); for (i = 0; i < 2; i++) __raw_writel(0, &tx4939_ircptr->dm2[i].r); /* enable interrupt control */ __raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r); __raw_writel(irc_elevel, &tx4939_ircptr->msk.r); irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT, handle_simple_irq); /* raise priority for errors, timers, sio */ tx4939_irq_set_pri(TX4939_IR_WTOERR, 7); tx4939_irq_set_pri(TX4939_IR_PCIERR, 7); tx4939_irq_set_pri(TX4939_IR_PCIPME, 7); for (i = 0; i < TX4939_NUM_IR_TMR; i++) tx4939_irq_set_pri(TX4939_IR_TMR(i), 6); for (i = 0; i < TX4939_NUM_IR_SIO; i++) tx4939_irq_set_pri(TX4939_IR_SIO(i), 5); } int tx4939_irq(void) { u32 csr = __raw_readl(&tx4939_ircptr->cs.r); if (likely(!(csr & TXx9_IRCSR_IF))) return TXX9_IRQ_BASE + (csr & (TX4939_NUM_IR - 1)); return -1; }
gpl-2.0
vwmofo/android_kernel_htc_msm8960
arch/powerpc/boot/cuboot-83xx.c
14062
1525
/* * Old U-boot compatibility for 83xx * * Author: Scott Wood <scottwood@freescale.com> * * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #define TARGET_83xx #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { void *soc; dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); /* Unfortunately, the specific model number is encoded in the * soc node name in existing dts files -- once that is fixed, * this can do a simple path lookup. */ soc = find_node_by_devtype(NULL, "soc"); if (soc) { void *serial = NULL; setprop(soc, "bus-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); while ((serial = find_node_by_devtype(serial, "serial"))) { if (get_parent(serial) != soc) continue; setprop(serial, "clock-frequency", &bd.bi_busfreq, sizeof(bd.bi_busfreq)); } } } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
gpl-2.0
EAVR/EV3.14
ev3sources/extra/linux-03.20.00.13/drivers/char/tpm/tpm_infineon.c
239
16495
/* * Description: * Device Driver for the Infineon Technologies * SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module * Specifications at www.trustedcomputinggroup.org * * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com> * Sirrix AG - security technologies, http://www.sirrix.com and * Applied Data Security Group, Ruhr-University Bochum, Germany * Project-Homepage: http://www.prosec.rub.de/tpm * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/init.h> #include <linux/pnp.h> #include "tpm.h" /* Infineon specific definitions */ /* maximum number of WTX-packages */ #define TPM_MAX_WTX_PACKAGES 50 /* msleep-Time for WTX-packages */ #define TPM_WTX_MSLEEP_TIME 20 /* msleep-Time --> Interval to check status register */ #define TPM_MSLEEP_TIME 3 /* gives number of max. msleep()-calls before throwing timeout */ #define TPM_MAX_TRIES 5000 #define TPM_INFINEON_DEV_VEN_VALUE 0x15D1 #define TPM_INF_IO_PORT 0x0 #define TPM_INF_IO_MEM 0x1 #define TPM_INF_ADDR 0x0 #define TPM_INF_DATA 0x1 struct tpm_inf_dev { int iotype; void __iomem *mem_base; /* MMIO ioremap'd addr */ unsigned long map_base; /* phys MMIO base */ unsigned long map_size; /* MMIO region size */ unsigned int index_off; /* index register offset */ unsigned int data_regs; /* Data registers */ unsigned int data_size; unsigned int config_port; /* IO Port config index reg */ unsigned int config_size; }; static struct tpm_inf_dev tpm_dev; static inline void tpm_data_out(unsigned char data, unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.data_regs + offset); else writeb(data, tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline unsigned char tpm_data_in(unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.data_regs + offset); else return readb(tpm_dev.mem_base + tpm_dev.data_regs + offset); } static inline void tpm_config_out(unsigned char data, unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) outb(data, tpm_dev.config_port + offset); else writeb(data, tpm_dev.mem_base + tpm_dev.index_off + offset); } static inline unsigned char tpm_config_in(unsigned char offset) { if (tpm_dev.iotype == TPM_INF_IO_PORT) return inb(tpm_dev.config_port + offset); else return readb(tpm_dev.mem_base + tpm_dev.index_off + offset); } /* TPM header definitions */ enum infineon_tpm_header { TPM_VL_VER = 0x01, TPM_VL_CHANNEL_CONTROL = 0x07, TPM_VL_CHANNEL_PERSONALISATION = 0x0A, TPM_VL_CHANNEL_TPM = 0x0B, TPM_VL_CONTROL = 0x00, TPM_INF_NAK = 0x15, TPM_CTRL_WTX = 0x10, TPM_CTRL_WTX_ABORT = 0x18, TPM_CTRL_WTX_ABORT_ACK = 0x18, TPM_CTRL_ERROR = 0x20, TPM_CTRL_CHAININGACK = 0x40, TPM_CTRL_CHAINING = 0x80, TPM_CTRL_DATA = 0x04, TPM_CTRL_DATA_CHA = 0x84, TPM_CTRL_DATA_CHA_ACK = 0xC4 }; enum infineon_tpm_register { WRFIFO = 0x00, RDFIFO = 0x01, STAT = 0x02, CMD = 0x03 }; enum infineon_tpm_command_bits { CMD_DIS = 0x00, CMD_LP = 0x01, CMD_RES = 0x02, CMD_IRQC = 0x06 }; enum infineon_tpm_status_bits { STAT_XFE = 0x00, STAT_LPA = 0x01, STAT_FOK = 0x02, STAT_TOK = 0x03, STAT_IRQA = 0x06, STAT_RDA = 0x07 }; /* some outgoing values */ enum infineon_tpm_values { CHIP_ID1 = 0x20, CHIP_ID2 = 0x21, TPM_DAR = 0x30, RESET_LP_IRQC_DISABLE = 0x41, ENABLE_REGISTER_PAIR = 0x55, IOLIMH = 0x60, IOLIML = 0x61, DISABLE_REGISTER_PAIR = 0xAA, IDVENL = 0xF1, IDVENH = 0xF2, IDPDL = 0xF3, IDPDH = 0xF4 }; static int number_of_wtx; static int empty_fifo(struct tpm_chip *chip, int clear_wrfifo) { int status; int check = 0; int i; if (clear_wrfifo) { for (i = 0; i < 4096; i++) { status = tpm_data_in(WRFIFO); if (status == 0xff) { if (check == 5) break; else check++; } } } /* Note: The values which are currently in the FIFO of the TPM are thrown away since there is no usage for them. Usually, this has nothing to say, since the TPM will give its answer immediately or will be aborted anyway, so the data here is usually garbage and useless. We have to clean this, because the next communication with the TPM would be rubbish, if there is still some old data in the Read FIFO. */ i = 0; do { status = tpm_data_in(RDFIFO); status = tpm_data_in(STAT); i++; if (i == TPM_MAX_TRIES) return -EIO; } while ((status & (1 << STAT_RDA)) != 0); return 0; } static int wait(struct tpm_chip *chip, int wait_for_bit) { int status; int i; for (i = 0; i < TPM_MAX_TRIES; i++) { status = tpm_data_in(STAT); /* check the status-register if wait_for_bit is set */ if (status & 1 << wait_for_bit) break; msleep(TPM_MSLEEP_TIME); } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) dev_err(chip->dev, "Timeout in wait(STAT_XFE)\n"); if (wait_for_bit == STAT_RDA) dev_err(chip->dev, "Timeout in wait(STAT_RDA)\n"); return -EIO; } return 0; }; static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) { wait(chip, STAT_XFE); tpm_data_out(sendbyte, WRFIFO); } /* Note: WTX means Waiting-Time-Extension. Whenever the TPM needs more calculation time, it sends a WTX-package, which has to be acknowledged or aborted. This usually occurs if you are hammering the TPM with key creation. Set the maximum number of WTX-packages in the definitions above, if the number is reached, the waiting-time will be denied and the TPM command has to be resend. */ static void tpm_wtx(struct tpm_chip *chip) { number_of_wtx++; dev_info(chip->dev, "Granting WTX (%02d / %02d)\n", number_of_wtx, TPM_MAX_WTX_PACKAGES); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); msleep(TPM_WTX_MSLEEP_TIME); } static void tpm_wtx_abort(struct tpm_chip *chip) { dev_info(chip->dev, "Aborting WTX\n"); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX_ABORT); wait_and_send(chip, 0x00); wait_and_send(chip, 0x00); number_of_wtx = 0; msleep(TPM_WTX_MSLEEP_TIME); } static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) { int i; int ret; u32 size = 0; number_of_wtx = 0; recv_begin: /* start receiving header */ for (i = 0; i < 4; i++) { ret = wait(chip, STAT_RDA); if (ret) return -EIO; buf[i] = tpm_data_in(RDFIFO); } if (buf[0] != TPM_VL_VER) { dev_err(chip->dev, "Wrong transport protocol implementation!\n"); return -EIO; } if (buf[1] == TPM_CTRL_DATA) { /* size of the data received */ size = ((buf[2] << 8) | buf[3]); for (i = 0; i < size; i++) { wait(chip, STAT_RDA); buf[i] = tpm_data_in(RDFIFO); } if ((size == 0x6D00) && (buf[1] == 0x80)) { dev_err(chip->dev, "Error handling on vendor layer!\n"); return -EIO; } for (i = 0; i < size; i++) buf[i] = buf[i + 6]; size = size - 6; return size; } if (buf[1] == TPM_CTRL_WTX) { dev_info(chip->dev, "WTX-package received\n"); if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { tpm_wtx(chip); goto recv_begin; } else { tpm_wtx_abort(chip); goto recv_begin; } } if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { dev_info(chip->dev, "WTX-abort acknowledged\n"); return size; } if (buf[1] == TPM_CTRL_ERROR) { dev_err(chip->dev, "ERROR-package received:\n"); if (buf[4] == TPM_INF_NAK) dev_err(chip->dev, "-> Negative acknowledgement" " - retransmit command!\n"); return -EIO; } return -EIO; } static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) { int i; int ret; u8 count_high, count_low, count_4, count_3, count_2, count_1; /* Disabling Reset, LP and IRQC */ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); ret = empty_fifo(chip, 1); if (ret) { dev_err(chip->dev, "Timeout while clearing FIFO\n"); return -EIO; } ret = wait(chip, STAT_XFE); if (ret) return -EIO; count_4 = (count & 0xff000000) >> 24; count_3 = (count & 0x00ff0000) >> 16; count_2 = (count & 0x0000ff00) >> 8; count_1 = (count & 0x000000ff); count_high = ((count + 6) & 0xffffff00) >> 8; count_low = ((count + 6) & 0x000000ff); /* Sending Header */ wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_DATA); wait_and_send(chip, count_high); wait_and_send(chip, count_low); /* Sending Data Header */ wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_VL_CHANNEL_TPM); wait_and_send(chip, count_4); wait_and_send(chip, count_3); wait_and_send(chip, count_2); wait_and_send(chip, count_1); /* Sending Data */ for (i = 0; i < count; i++) { wait_and_send(chip, buf[i]); } return count; } static void tpm_inf_cancel(struct tpm_chip *chip) { /* Since we are using the legacy mode to communicate with the TPM, we have no cancel functions, but have a workaround for interrupting the TPM through WTX. */ } static u8 tpm_inf_status(struct tpm_chip *chip) { return tpm_data_in(STAT); } static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static struct attribute *inf_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_caps.attr, &dev_attr_cancel.attr, NULL, }; static struct attribute_group inf_attr_grp = {.attrs = inf_attrs }; static const struct file_operations inf_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = tpm_open, .read = tpm_read, .write = tpm_write, .release = tpm_release, }; static const struct tpm_vendor_specific tpm_inf = { .recv = tpm_inf_recv, .send = tpm_inf_send, .cancel = tpm_inf_cancel, .status = tpm_inf_status, .req_complete_mask = 0, .req_complete_val = 0, .attr_group = &inf_attr_grp, .miscdev = {.fops = &inf_ops,}, }; static const struct pnp_device_id tpm_pnp_tbl[] = { /* Infineon TPMs */ {"IFX0101", 0}, {"IFX0102", 0}, {"", 0} }; MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { int rc = 0; u8 iol, ioh; int vendorid[2]; int version[2]; int productid[2]; char chipname[20]; struct tpm_chip *chip; /* read IO-ports through PnP */ if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { tpm_dev.iotype = TPM_INF_IO_PORT; tpm_dev.config_port = pnp_port_start(dev, 0); tpm_dev.config_size = pnp_port_len(dev, 0); tpm_dev.data_regs = pnp_port_start(dev, 1); tpm_dev.data_size = pnp_port_len(dev, 1); if ((tpm_dev.data_size < 4) || (tpm_dev.config_size < 2)) { rc = -EINVAL; goto err_last; } dev_info(&dev->dev, "Found %s with ID %s\n", dev->name, dev_id->id); if (!((tpm_dev.data_regs >> 8) & 0xff)) { rc = -EINVAL; goto err_last; } /* publish my base address and request region */ if (request_region(tpm_dev.data_regs, tpm_dev.data_size, "tpm_infineon0") == NULL) { rc = -EINVAL; goto err_last; } if (request_region(tpm_dev.config_port, tpm_dev.config_size, "tpm_infineon0") == NULL) { release_region(tpm_dev.data_regs, tpm_dev.data_size); rc = -EINVAL; goto err_last; } } else if (pnp_mem_valid(dev, 0) && !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { tpm_dev.iotype = TPM_INF_IO_MEM; tpm_dev.map_base = pnp_mem_start(dev, 0); tpm_dev.map_size = pnp_mem_len(dev, 0); dev_info(&dev->dev, "Found %s with ID %s\n", dev->name, dev_id->id); /* publish my base address and request region */ if (request_mem_region(tpm_dev.map_base, tpm_dev.map_size, "tpm_infineon0") == NULL) { rc = -EINVAL; goto err_last; } tpm_dev.mem_base = ioremap(tpm_dev.map_base, tpm_dev.map_size); if (tpm_dev.mem_base == NULL) { release_mem_region(tpm_dev.map_base, tpm_dev.map_size); rc = -EINVAL; goto err_last; } /* * The only known MMIO based Infineon TPM system provides * a single large mem region with the device config * registers at the default TPM_ADDR. The data registers * seem like they could be placed anywhere within the MMIO * region, but lets just put them at zero offset. */ tpm_dev.index_off = TPM_ADDR; tpm_dev.data_regs = 0x0; } else { rc = -EINVAL; goto err_last; } /* query chip for its vendor, its version number a.s.o. */ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); tpm_config_out(IDVENL, TPM_INF_ADDR); vendorid[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDVENH, TPM_INF_ADDR); vendorid[0] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDPDL, TPM_INF_ADDR); productid[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(IDPDH, TPM_INF_ADDR); productid[0] = tpm_config_in(TPM_INF_DATA); tpm_config_out(CHIP_ID1, TPM_INF_ADDR); version[1] = tpm_config_in(TPM_INF_DATA); tpm_config_out(CHIP_ID2, TPM_INF_ADDR); version[0] = tpm_config_in(TPM_INF_DATA); switch ((productid[0] << 8) | productid[1]) { case 6: snprintf(chipname, sizeof(chipname), " (SLD 9630 TT 1.1)"); break; case 11: snprintf(chipname, sizeof(chipname), " (SLB 9635 TT 1.2)"); break; default: snprintf(chipname, sizeof(chipname), " (unknown chip)"); break; } if ((vendorid[0] << 8 | vendorid[1]) == (TPM_INFINEON_DEV_VEN_VALUE)) { /* configure TPM with IO-ports */ tpm_config_out(IOLIMH, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); tpm_config_out(IOLIML, TPM_INF_ADDR); tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); /* control if IO-ports are set correctly */ tpm_config_out(IOLIMH, TPM_INF_ADDR); ioh = tpm_config_in(TPM_INF_DATA); tpm_config_out(IOLIML, TPM_INF_ADDR); iol = tpm_config_in(TPM_INF_DATA); if ((ioh << 8 | iol) != tpm_dev.data_regs) { dev_err(&dev->dev, "Could not set IO-data registers to 0x%x\n", tpm_dev.data_regs); rc = -EIO; goto err_release_region; } /* activate register */ tpm_config_out(TPM_DAR, TPM_INF_ADDR); tpm_config_out(0x01, TPM_INF_DATA); tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); /* disable RESET, LP and IRQC */ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); /* Finally, we're done, print some infos */ dev_info(&dev->dev, "TPM found: " "config base 0x%lx, " "data base 0x%lx, " "chip version 0x%02x%02x, " "vendor id 0x%x%x (Infineon), " "product id 0x%02x%02x" "%s\n", tpm_dev.iotype == TPM_INF_IO_PORT ? tpm_dev.config_port : tpm_dev.map_base + tpm_dev.index_off, tpm_dev.iotype == TPM_INF_IO_PORT ? tpm_dev.data_regs : tpm_dev.map_base + tpm_dev.data_regs, version[0], version[1], vendorid[0], vendorid[1], productid[0], productid[1], chipname); if (!(chip = tpm_register_hardware(&dev->dev, &tpm_inf))) goto err_release_region; return 0; } else { rc = -ENODEV; goto err_release_region; } err_release_region: if (tpm_dev.iotype == TPM_INF_IO_PORT) { release_region(tpm_dev.data_regs, tpm_dev.data_size); release_region(tpm_dev.config_port, tpm_dev.config_size); } else { iounmap(tpm_dev.mem_base); release_mem_region(tpm_dev.map_base, tpm_dev.map_size); } err_last: return rc; } static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) { struct tpm_chip *chip = pnp_get_drvdata(dev); if (chip) { if (tpm_dev.iotype == TPM_INF_IO_PORT) { release_region(tpm_dev.data_regs, tpm_dev.data_size); release_region(tpm_dev.config_port, tpm_dev.config_size); } else { iounmap(tpm_dev.mem_base); release_mem_region(tpm_dev.map_base, tpm_dev.map_size); } tpm_remove_hardware(chip->dev); } } static struct pnp_driver tpm_inf_pnp_driver = { .name = "tpm_inf_pnp", .driver = { .owner = THIS_MODULE, .suspend = tpm_pm_suspend, .resume = tpm_pm_resume, }, .id_table = tpm_pnp_tbl, .probe = tpm_inf_pnp_probe, .remove = __devexit_p(tpm_inf_pnp_remove), }; static int __init init_inf(void) { return pnp_register_driver(&tpm_inf_pnp_driver); } static void __exit cleanup_inf(void) { pnp_unregister_driver(&tpm_inf_pnp_driver); } module_init(init_inf); module_exit(cleanup_inf); MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); MODULE_VERSION("1.9"); MODULE_LICENSE("GPL");
gpl-2.0
andrey-utkin/linux
drivers/input/touchscreen/tsc2004.c
495
2261
/* * TSC2004 touchscreen driver * * Copyright (C) 2015 QWERTY Embedded Design * Copyright (C) 2015 EMAC Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/input.h> #include <linux/of.h> #include <linux/i2c.h> #include <linux/regmap.h> #include "tsc200x-core.h" static const struct input_id tsc2004_input_id = { .bustype = BUS_I2C, .product = 2004, }; static int tsc2004_cmd(struct device *dev, u8 cmd) { u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd; s32 data; struct i2c_client *i2c = to_i2c_client(dev); data = i2c_smbus_write_byte(i2c, tx); if (data < 0) { dev_err(dev, "%s: failed, command: %x i2c error: %d\n", __func__, cmd, data); return data; } return 0; } static int tsc2004_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id, devm_regmap_init_i2c(i2c, &tsc200x_regmap_config), tsc2004_cmd); } static int tsc2004_remove(struct i2c_client *i2c) { return tsc200x_remove(&i2c->dev); } static const struct i2c_device_id tsc2004_idtable[] = { { "tsc2004", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tsc2004_idtable); #ifdef CONFIG_OF static const struct of_device_id tsc2004_of_match[] = { { .compatible = "ti,tsc2004" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, tsc2004_of_match); #endif static struct i2c_driver tsc2004_driver = { .driver = { .name = "tsc2004", .of_match_table = of_match_ptr(tsc2004_of_match), .pm = &tsc200x_pm_ops, }, .id_table = tsc2004_idtable, .probe = tsc2004_probe, .remove = tsc2004_remove, }; module_i2c_driver(tsc2004_driver); MODULE_AUTHOR("Michael Welling <mwelling@ieee.org>"); MODULE_DESCRIPTION("TSC2004 Touchscreen Driver"); MODULE_LICENSE("GPL");
gpl-2.0
patholden/linux
drivers/mfd/cros_ec_spi.c
751
11350
/* * ChromeOS EC multi-function device (SPI) * * Copyright (C) 2012 Google, Inc * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mfd/cros_ec.h> #include <linux/mfd/cros_ec_commands.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spi/spi.h> /* The header byte, which follows the preamble */ #define EC_MSG_HEADER 0xec /* * Number of EC preamble bytes we read at a time. Since it takes * about 400-500us for the EC to respond there is not a lot of * point in tuning this. If the EC could respond faster then * we could increase this so that might expect the preamble and * message to occur in a single transaction. However, the maximum * SPI transfer size is 256 bytes, so at 5MHz we need a response * time of perhaps <320us (200 bytes / 1600 bits). */ #define EC_MSG_PREAMBLE_COUNT 32 /* * Allow for a long time for the EC to respond. We support i2c * tunneling and support fairly long messages for the tunnel (249 * bytes long at the moment). If we're talking to a 100 kHz device * on the other end and need to transfer ~256 bytes, then we need: * 10 us/bit * ~10 bits/byte * ~256 bytes = ~25ms * * We'll wait 4 times that to handle clock stretching and other * paranoia. * * It's pretty unlikely that we'll really see a 249 byte tunnel in * anything other than testing. If this was more common we might * consider having slow commands like this require a GET_STATUS * wait loop. The 'flash write' command would be another candidate * for this, clocking in at 2-3ms. */ #define EC_MSG_DEADLINE_MS 100 /* * Time between raising the SPI chip select (for the end of a * transaction) and dropping it again (for the next transaction). * If we go too fast, the EC will miss the transaction. We know that we * need at least 70 us with the 16 MHz STM32 EC, so go with 200 us to be * safe. */ #define EC_SPI_RECOVERY_TIME_NS (200 * 1000) /* * The EC is unresponsive for a time after a reboot command. Add a * simple delay to make sure that the bus stays locked. */ #define EC_REBOOT_DELAY_MS 50 /** * struct cros_ec_spi - information about a SPI-connected EC * * @spi: SPI device we are connected to * @last_transfer_ns: time that we last finished a transfer, or 0 if there * if no record * @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that * is sent when we want to turn off CS at the end of a transaction. */ struct cros_ec_spi { struct spi_device *spi; s64 last_transfer_ns; unsigned int end_of_msg_delay; }; static void debug_packet(struct device *dev, const char *name, u8 *ptr, int len) { #ifdef DEBUG int i; dev_dbg(dev, "%s: ", name); for (i = 0; i < len; i++) pr_cont(" %02x", ptr[i]); pr_cont("\n"); #endif } /** * cros_ec_spi_receive_response - Receive a response from the EC. * * This function has two phases: reading the preamble bytes (since if we read * data from the EC before it is ready to send, we just get preamble) and * reading the actual message. * * The received data is placed into ec_dev->din. * * @ec_dev: ChromeOS EC device * @need_len: Number of message bytes we need to read */ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, int need_len) { struct cros_ec_spi *ec_spi = ec_dev->priv; struct spi_transfer trans; struct spi_message msg; u8 *ptr, *end; int ret; unsigned long deadline; int todo; /* Receive data until we see the header byte */ deadline = jiffies + msecs_to_jiffies(EC_MSG_DEADLINE_MS); while (true) { unsigned long start_jiffies = jiffies; memset(&trans, 0, sizeof(trans)); trans.cs_change = 1; trans.rx_buf = ptr = ec_dev->din; trans.len = EC_MSG_PREAMBLE_COUNT; spi_message_init(&msg); spi_message_add_tail(&trans, &msg); ret = spi_sync(ec_spi->spi, &msg); if (ret < 0) { dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); return ret; } for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) { if (*ptr == EC_MSG_HEADER) { dev_dbg(ec_dev->dev, "msg found at %zd\n", ptr - ec_dev->din); break; } } if (ptr != end) break; /* * Use the time at the start of the loop as a timeout. This * gives us one last shot at getting the transfer and is useful * in case we got context switched out for a while. */ if (time_after(start_jiffies, deadline)) { dev_warn(ec_dev->dev, "EC failed to respond in time\n"); return -ETIMEDOUT; } } /* * ptr now points to the header byte. Copy any valid data to the * start of our buffer */ todo = end - ++ptr; BUG_ON(todo < 0 || todo > ec_dev->din_size); todo = min(todo, need_len); memmove(ec_dev->din, ptr, todo); ptr = ec_dev->din + todo; dev_dbg(ec_dev->dev, "need %d, got %d bytes from preamble\n", need_len, todo); need_len -= todo; /* Receive data until we have it all */ while (need_len > 0) { /* * We can't support transfers larger than the SPI FIFO size * unless we have DMA. We don't have DMA on the ISP SPI ports * for Exynos. We need a way of asking SPI driver for * maximum-supported transfer size. */ todo = min(need_len, 256); dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n", todo, need_len, ptr - ec_dev->din); memset(&trans, 0, sizeof(trans)); trans.cs_change = 1; trans.rx_buf = ptr; trans.len = todo; spi_message_init(&msg); spi_message_add_tail(&trans, &msg); /* send command to EC and read answer */ BUG_ON((u8 *)trans.rx_buf - ec_dev->din + todo > ec_dev->din_size); ret = spi_sync(ec_spi->spi, &msg); if (ret < 0) { dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); return ret; } debug_packet(ec_dev->dev, "interim", ptr, todo); ptr += todo; need_len -= todo; } dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din); return 0; } /** * cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply * * @ec_dev: ChromeOS EC device * @ec_msg: Message to transfer */ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev, struct cros_ec_command *ec_msg) { struct cros_ec_spi *ec_spi = ec_dev->priv; struct spi_transfer trans; struct spi_message msg; int i, len; u8 *ptr; int sum; int ret = 0, final_ret; len = cros_ec_prepare_tx(ec_dev, ec_msg); dev_dbg(ec_dev->dev, "prepared, len=%d\n", len); /* If it's too soon to do another transaction, wait */ if (ec_spi->last_transfer_ns) { unsigned long delay; /* The delay completed so far */ delay = ktime_get_ns() - ec_spi->last_transfer_ns; if (delay < EC_SPI_RECOVERY_TIME_NS) ndelay(EC_SPI_RECOVERY_TIME_NS - delay); } /* Transmit phase - send our message */ debug_packet(ec_dev->dev, "out", ec_dev->dout, len); memset(&trans, 0, sizeof(trans)); trans.tx_buf = ec_dev->dout; trans.len = len; trans.cs_change = 1; spi_message_init(&msg); spi_message_add_tail(&trans, &msg); ret = spi_sync(ec_spi->spi, &msg); /* Get the response */ if (!ret) { ret = cros_ec_spi_receive_response(ec_dev, ec_msg->insize + EC_MSG_TX_PROTO_BYTES); } else { dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); } /* * Turn off CS, possibly adding a delay to ensure the rising edge * doesn't come too soon after the end of the data. */ spi_message_init(&msg); memset(&trans, 0, sizeof(trans)); trans.delay_usecs = ec_spi->end_of_msg_delay; spi_message_add_tail(&trans, &msg); final_ret = spi_sync(ec_spi->spi, &msg); ec_spi->last_transfer_ns = ktime_get_ns(); if (!ret) ret = final_ret; if (ret < 0) { dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); goto exit; } ptr = ec_dev->din; /* check response error code */ ec_msg->result = ptr[0]; ret = cros_ec_check_result(ec_dev, ec_msg); if (ret) goto exit; len = ptr[1]; sum = ptr[0] + ptr[1]; if (len > ec_msg->insize) { dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)", len, ec_msg->insize); ret = -ENOSPC; goto exit; } /* copy response packet payload and compute checksum */ for (i = 0; i < len; i++) { sum += ptr[i + 2]; if (ec_msg->insize) ec_msg->indata[i] = ptr[i + 2]; } sum &= 0xff; debug_packet(ec_dev->dev, "in", ptr, len + 3); if (sum != ptr[len + 2]) { dev_err(ec_dev->dev, "bad packet checksum, expected %02x, got %02x\n", sum, ptr[len + 2]); ret = -EBADMSG; goto exit; } ret = len; exit: if (ec_msg->command == EC_CMD_REBOOT_EC) msleep(EC_REBOOT_DELAY_MS); return ret; } static void cros_ec_spi_dt_probe(struct cros_ec_spi *ec_spi, struct device *dev) { struct device_node *np = dev->of_node; u32 val; int ret; ret = of_property_read_u32(np, "google,cros-ec-spi-msg-delay", &val); if (!ret) ec_spi->end_of_msg_delay = val; } static int cros_ec_spi_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct cros_ec_device *ec_dev; struct cros_ec_spi *ec_spi; int err; spi->bits_per_word = 8; spi->mode = SPI_MODE_0; err = spi_setup(spi); if (err < 0) return err; ec_spi = devm_kzalloc(dev, sizeof(*ec_spi), GFP_KERNEL); if (ec_spi == NULL) return -ENOMEM; ec_spi->spi = spi; ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL); if (!ec_dev) return -ENOMEM; /* Check for any DT properties */ cros_ec_spi_dt_probe(ec_spi, dev); spi_set_drvdata(spi, ec_dev); ec_dev->dev = dev; ec_dev->priv = ec_spi; ec_dev->irq = spi->irq; ec_dev->cmd_xfer = cros_ec_cmd_xfer_spi; ec_dev->ec_name = ec_spi->spi->modalias; ec_dev->phys_name = dev_name(&ec_spi->spi->dev); ec_dev->parent = &ec_spi->spi->dev; ec_dev->din_size = EC_MSG_BYTES + EC_MSG_PREAMBLE_COUNT; ec_dev->dout_size = EC_MSG_BYTES; err = cros_ec_register(ec_dev); if (err) { dev_err(dev, "cannot register EC\n"); return err; } device_init_wakeup(&spi->dev, true); return 0; } static int cros_ec_spi_remove(struct spi_device *spi) { struct cros_ec_device *ec_dev; ec_dev = spi_get_drvdata(spi); cros_ec_remove(ec_dev); return 0; } #ifdef CONFIG_PM_SLEEP static int cros_ec_spi_suspend(struct device *dev) { struct cros_ec_device *ec_dev = dev_get_drvdata(dev); return cros_ec_suspend(ec_dev); } static int cros_ec_spi_resume(struct device *dev) { struct cros_ec_device *ec_dev = dev_get_drvdata(dev); return cros_ec_resume(ec_dev); } #endif static SIMPLE_DEV_PM_OPS(cros_ec_spi_pm_ops, cros_ec_spi_suspend, cros_ec_spi_resume); static const struct spi_device_id cros_ec_spi_id[] = { { "cros-ec-spi", 0 }, { } }; MODULE_DEVICE_TABLE(spi, cros_ec_spi_id); static struct spi_driver cros_ec_driver_spi = { .driver = { .name = "cros-ec-spi", .owner = THIS_MODULE, .pm = &cros_ec_spi_pm_ops, }, .probe = cros_ec_spi_probe, .remove = cros_ec_spi_remove, .id_table = cros_ec_spi_id, }; module_spi_driver(cros_ec_driver_spi); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("ChromeOS EC multi function device (SPI)");
gpl-2.0
Haderach/linux-mirror
arch/x86/mm/pageattr-test.c
751
5440
/* * self test for change_page_attr. * * Clears the a test pte bit on random pages in the direct mapping, * then reverts and compares page tables forwards and afterwards. */ #include <linux/bootmem.h> #include <linux/kthread.h> #include <linux/random.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include <asm/kdebug.h> /* * Only print the results of the first pass: */ static __read_mostly int print = 1; enum { NTEST = 400, #ifdef CONFIG_X86_64 LPS = (1 << PMD_SHIFT), #elif defined(CONFIG_X86_PAE) LPS = (1 << PMD_SHIFT), #else LPS = (1 << 22), #endif GPS = (1<<30) }; #define PAGE_CPA_TEST __pgprot(_PAGE_CPA_TEST) static int pte_testbit(pte_t pte) { return pte_flags(pte) & _PAGE_SOFTW1; } struct split_state { long lpg, gpg, spg, exec; long min_exec, max_exec; }; static int print_split(struct split_state *s) { long i, expected, missed = 0; int err = 0; s->lpg = s->gpg = s->spg = s->exec = 0; s->min_exec = ~0UL; s->max_exec = 0; for (i = 0; i < max_pfn_mapped; ) { unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT); unsigned int level; pte_t *pte; pte = lookup_address(addr, &level); if (!pte) { missed++; i++; continue; } if (level == PG_LEVEL_1G && sizeof(long) == 8) { s->gpg++; i += GPS/PAGE_SIZE; } else if (level == PG_LEVEL_2M) { if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) { printk(KERN_ERR "%lx level %d but not PSE %Lx\n", addr, level, (u64)pte_val(*pte)); err = 1; } s->lpg++; i += LPS/PAGE_SIZE; } else { s->spg++; i++; } if (!(pte_val(*pte) & _PAGE_NX)) { s->exec++; if (addr < s->min_exec) s->min_exec = addr; if (addr > s->max_exec) s->max_exec = addr; } } if (print) { printk(KERN_INFO " 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n", s->spg, s->lpg, s->gpg, s->exec, s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed); } expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed; if (expected != i) { printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n", max_pfn_mapped, expected); return 1; } return err; } static unsigned long addr[NTEST]; static unsigned int len[NTEST]; /* Change the global bit on random pages in the direct mapping */ static int pageattr_test(void) { struct split_state sa, sb, sc; unsigned long *bm; pte_t *pte, pte0; int failed = 0; unsigned int level; int i, k; int err; unsigned long test_addr; if (print) printk(KERN_INFO "CPA self-test:\n"); bm = vzalloc((max_pfn_mapped + 7) / 8); if (!bm) { printk(KERN_ERR "CPA Cannot vmalloc bitmap\n"); return -ENOMEM; } failed += print_split(&sa); for (i = 0; i < NTEST; i++) { unsigned long pfn = prandom_u32() % max_pfn_mapped; addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); len[i] = prandom_u32() % 100; len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); if (len[i] == 0) len[i] = 1; pte = NULL; pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */ for (k = 0; k < len[i]; k++) { pte = lookup_address(addr[i] + k*PAGE_SIZE, &level); if (!pte || pgprot_val(pte_pgprot(*pte)) == 0 || !(pte_val(*pte) & _PAGE_PRESENT)) { addr[i] = 0; break; } if (k == 0) { pte0 = *pte; } else { if (pgprot_val(pte_pgprot(*pte)) != pgprot_val(pte_pgprot(pte0))) { len[i] = k; break; } } if (test_bit(pfn + k, bm)) { len[i] = k; break; } __set_bit(pfn + k, bm); } if (!addr[i] || !pte || !k) { addr[i] = 0; continue; } test_addr = addr[i]; err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0); if (err < 0) { printk(KERN_ERR "CPA %d failed %d\n", i, err); failed++; } pte = lookup_address(addr[i], &level); if (!pte || !pte_testbit(*pte) || pte_huge(*pte)) { printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i], pte ? (u64)pte_val(*pte) : 0ULL); failed++; } if (level != PG_LEVEL_4K) { printk(KERN_ERR "CPA %lx: unexpected level %d\n", addr[i], level); failed++; } } vfree(bm); failed += print_split(&sb); for (i = 0; i < NTEST; i++) { if (!addr[i]) continue; pte = lookup_address(addr[i], &level); if (!pte) { printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]); failed++; continue; } test_addr = addr[i]; err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0); if (err < 0) { printk(KERN_ERR "CPA reverting failed: %d\n", err); failed++; } pte = lookup_address(addr[i], &level); if (!pte || pte_testbit(*pte)) { printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n", addr[i], pte ? (u64)pte_val(*pte) : 0ULL); failed++; } } failed += print_split(&sc); if (failed) { WARN(1, KERN_ERR "NOT PASSED. Please report.\n"); return -EINVAL; } else { if (print) printk(KERN_INFO "ok.\n"); } return 0; } static int do_pageattr_test(void *__unused) { while (!kthread_should_stop()) { schedule_timeout_interruptible(HZ*30); if (pageattr_test() < 0) break; if (print) print--; } return 0; } static int start_pageattr_test(void) { struct task_struct *p; p = kthread_create(do_pageattr_test, NULL, "pageattr-test"); if (!IS_ERR(p)) wake_up_process(p); else WARN_ON(1); return 0; } device_initcall(start_pageattr_test);
gpl-2.0
tapash/linux
arch/x86/mm/pageattr-test.c
751
5440
/* * self test for change_page_attr. * * Clears the a test pte bit on random pages in the direct mapping, * then reverts and compares page tables forwards and afterwards. */ #include <linux/bootmem.h> #include <linux/kthread.h> #include <linux/random.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include <asm/kdebug.h> /* * Only print the results of the first pass: */ static __read_mostly int print = 1; enum { NTEST = 400, #ifdef CONFIG_X86_64 LPS = (1 << PMD_SHIFT), #elif defined(CONFIG_X86_PAE) LPS = (1 << PMD_SHIFT), #else LPS = (1 << 22), #endif GPS = (1<<30) }; #define PAGE_CPA_TEST __pgprot(_PAGE_CPA_TEST) static int pte_testbit(pte_t pte) { return pte_flags(pte) & _PAGE_SOFTW1; } struct split_state { long lpg, gpg, spg, exec; long min_exec, max_exec; }; static int print_split(struct split_state *s) { long i, expected, missed = 0; int err = 0; s->lpg = s->gpg = s->spg = s->exec = 0; s->min_exec = ~0UL; s->max_exec = 0; for (i = 0; i < max_pfn_mapped; ) { unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT); unsigned int level; pte_t *pte; pte = lookup_address(addr, &level); if (!pte) { missed++; i++; continue; } if (level == PG_LEVEL_1G && sizeof(long) == 8) { s->gpg++; i += GPS/PAGE_SIZE; } else if (level == PG_LEVEL_2M) { if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) { printk(KERN_ERR "%lx level %d but not PSE %Lx\n", addr, level, (u64)pte_val(*pte)); err = 1; } s->lpg++; i += LPS/PAGE_SIZE; } else { s->spg++; i++; } if (!(pte_val(*pte) & _PAGE_NX)) { s->exec++; if (addr < s->min_exec) s->min_exec = addr; if (addr > s->max_exec) s->max_exec = addr; } } if (print) { printk(KERN_INFO " 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n", s->spg, s->lpg, s->gpg, s->exec, s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed); } expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed; if (expected != i) { printk(KERN_ERR "CPA max_pfn_mapped %lu but expected %lu\n", max_pfn_mapped, expected); return 1; } return err; } static unsigned long addr[NTEST]; static unsigned int len[NTEST]; /* Change the global bit on random pages in the direct mapping */ static int pageattr_test(void) { struct split_state sa, sb, sc; unsigned long *bm; pte_t *pte, pte0; int failed = 0; unsigned int level; int i, k; int err; unsigned long test_addr; if (print) printk(KERN_INFO "CPA self-test:\n"); bm = vzalloc((max_pfn_mapped + 7) / 8); if (!bm) { printk(KERN_ERR "CPA Cannot vmalloc bitmap\n"); return -ENOMEM; } failed += print_split(&sa); for (i = 0; i < NTEST; i++) { unsigned long pfn = prandom_u32() % max_pfn_mapped; addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT); len[i] = prandom_u32() % 100; len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1); if (len[i] == 0) len[i] = 1; pte = NULL; pte0 = pfn_pte(0, __pgprot(0)); /* shut gcc up */ for (k = 0; k < len[i]; k++) { pte = lookup_address(addr[i] + k*PAGE_SIZE, &level); if (!pte || pgprot_val(pte_pgprot(*pte)) == 0 || !(pte_val(*pte) & _PAGE_PRESENT)) { addr[i] = 0; break; } if (k == 0) { pte0 = *pte; } else { if (pgprot_val(pte_pgprot(*pte)) != pgprot_val(pte_pgprot(pte0))) { len[i] = k; break; } } if (test_bit(pfn + k, bm)) { len[i] = k; break; } __set_bit(pfn + k, bm); } if (!addr[i] || !pte || !k) { addr[i] = 0; continue; } test_addr = addr[i]; err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0); if (err < 0) { printk(KERN_ERR "CPA %d failed %d\n", i, err); failed++; } pte = lookup_address(addr[i], &level); if (!pte || !pte_testbit(*pte) || pte_huge(*pte)) { printk(KERN_ERR "CPA %lx: bad pte %Lx\n", addr[i], pte ? (u64)pte_val(*pte) : 0ULL); failed++; } if (level != PG_LEVEL_4K) { printk(KERN_ERR "CPA %lx: unexpected level %d\n", addr[i], level); failed++; } } vfree(bm); failed += print_split(&sb); for (i = 0; i < NTEST; i++) { if (!addr[i]) continue; pte = lookup_address(addr[i], &level); if (!pte) { printk(KERN_ERR "CPA lookup of %lx failed\n", addr[i]); failed++; continue; } test_addr = addr[i]; err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0); if (err < 0) { printk(KERN_ERR "CPA reverting failed: %d\n", err); failed++; } pte = lookup_address(addr[i], &level); if (!pte || pte_testbit(*pte)) { printk(KERN_ERR "CPA %lx: bad pte after revert %Lx\n", addr[i], pte ? (u64)pte_val(*pte) : 0ULL); failed++; } } failed += print_split(&sc); if (failed) { WARN(1, KERN_ERR "NOT PASSED. Please report.\n"); return -EINVAL; } else { if (print) printk(KERN_INFO "ok.\n"); } return 0; } static int do_pageattr_test(void *__unused) { while (!kthread_should_stop()) { schedule_timeout_interruptible(HZ*30); if (pageattr_test() < 0) break; if (print) print--; } return 0; } static int start_pageattr_test(void) { struct task_struct *p; p = kthread_create(do_pageattr_test, NULL, "pageattr-test"); if (!IS_ERR(p)) wake_up_process(p); else WARN_ON(1); return 0; } device_initcall(start_pageattr_test);
gpl-2.0
vocoderism/kernel_falcon_umts_kk
drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
1263
15015
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "reg.h" #include "def.h" #include "phy.h" #include "rf.h" #include "dm.h" static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw); void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); switch (bandwidth) { case HT_CHANNEL_WIDTH_20: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff) | 0x0400); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; case HT_CHANNEL_WIDTH_20_40: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff)); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "unknown bandwidth: %#X\n", bandwidth); break; } } void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 tx_agc[2] = { 0, 0 }, tmpval = 0; bool turbo_scanoff = false; u8 idx1, idx2; u8 *ptr; if (rtlhal->interface == INTF_PCI) { if (rtlefuse->eeprom_regulatory != 0) turbo_scanoff = true; } else { if ((rtlefuse->eeprom_regulatory != 0) || (rtlefuse->external_pa)) turbo_scanoff = true; } if (mac->act_scanning) { tx_agc[RF90_PATH_A] = 0x3f3f3f3f; tx_agc[RF90_PATH_B] = 0x3f3f3f3f; if (turbo_scanoff) { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); if (rtlhal->interface == INTF_USB) { if (tx_agc[idx1] > 0x20 && rtlefuse->external_pa) tx_agc[idx1] = 0x20; } } } } else { if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL1) { tx_agc[RF90_PATH_A] = 0x10101010; tx_agc[RF90_PATH_B] = 0x10101010; } else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL2) { tx_agc[RF90_PATH_A] = 0x00000000; tx_agc[RF90_PATH_B] = 0x00000000; } else{ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } if (rtlefuse->eeprom_regulatory == 0) { tmpval = (rtlphy->mcs_txpwrlevel_origoffset [0][6]) + (rtlphy->mcs_txpwrlevel_origoffset [0][7] << 8); tx_agc[RF90_PATH_A] += tmpval; tmpval = (rtlphy->mcs_txpwrlevel_origoffset [0][14]) + (rtlphy->mcs_txpwrlevel_origoffset [0][15] << 24); tx_agc[RF90_PATH_B] += tmpval; } } } for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { ptr = (u8 *) (&(tx_agc[idx1])); for (idx2 = 0; idx2 < 4; idx2++) { if (*ptr > RF6052_MAX_TX_PWR) *ptr = RF6052_MAX_TX_PWR; ptr++; } } tmpval = tx_agc[RF90_PATH_A] & 0xff; rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_A_CCK1_MCS32); tmpval = tx_agc[RF90_PATH_A] >> 8; if (mac->mode == WIRELESS_MODE_B) tmpval = tmpval & 0xff00ffff; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] >> 24; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK1_55_MCS32); } static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel, u32 *ofdmbase, u32 *mcsbase) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 powerBase0, powerBase1; u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = ppowerlevel[i]; legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; powerBase0 = powerlevel[i] + legacy_pwrdiff; powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0; *(ofdmbase + i) = powerBase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [OFDM power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(ofdmbase + i)); } for (i = 0; i < 2; i++) { if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; powerlevel[i] += ht20_pwrdiff; } powerBase1 = powerlevel[i]; powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; *(mcsbase + i) = powerBase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(mcsbase + i)); } } static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, u32 *powerBase0, u32 *powerBase1, u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup = 0, pwr_diff_limit[4]; u32 writeVal, customer_limit, rf; for (rf = 0; rf < 2; rf++) { switch (rtlefuse->eeprom_regulatory) { case 0: chnlgroup = 0; writeVal = rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance,writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 1: if (rtlphy->pwrgroup_cnt == 1) chnlgroup = 0; if (rtlphy->pwrgroup_cnt >= 3) { if (channel <= 3) chnlgroup = 0; else if (channel >= 4 && channel <= 9) chnlgroup = 1; else if (channel > 9) chnlgroup = 2; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) chnlgroup++; else chnlgroup += 4; } writeVal = rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 2: writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory,writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 3: chnlgroup = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 40MHzrf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht40[rf] [channel - 1]); } else { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 20MHz rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht20[rf] [channel - 1]); } for (i = 0; i < 4; i++) { pwr_diff_limit[i] = (u8) ((rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] & (0x7f << (i * 8))) >> (i * 8)); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (pwr_diff_limit[i] > rtlefuse->pwrgroup_ht40[rf] [channel - 1]) pwr_diff_limit[i] = rtlefuse-> pwrgroup_ht40[rf] [channel - 1]; } else { if (pwr_diff_limit[i] > rtlefuse->pwrgroup_ht20[rf] [channel - 1]) pwr_diff_limit[i] = rtlefuse->pwrgroup_ht20[rf] [channel - 1]; } } customer_limit = (pwr_diff_limit[3] << 24) | (pwr_diff_limit[2] << 16) | (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', customer_limit); writeVal = customer_limit + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer, writeVal rf(%c)= 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; default: chnlgroup = 0; writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance, writeValrf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; } if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL1) writeVal = 0x14141414; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL2) writeVal = 0x00000000; if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) writeVal = writeVal - 0x06060606; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT2) writeVal = writeVal; *(p_outwriteval + rf) = writeVal; } } static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, u8 index, u32 *pValue) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u16 regoffset_a[6] = { RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 }; u16 regoffset_b[6] = { RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 }; u8 i, rf, pwr_val[4]; u32 writeVal; u16 regoffset; for (rf = 0; rf < 2; rf++) { writeVal = pValue[rf]; for (i = 0; i < 4; i++) { pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; else regoffset = regoffset_b[index]; rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Set 0x%x = %08x\n", regoffset, writeVal); if (((get_rf_type(rtlphy) == RF_2T2R) && (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_B_MCS15_MCS12)) || ((get_rf_type(rtlphy) != RF_2T2R) && (regoffset == RTXAGC_A_MCS07_MCS04 || regoffset == RTXAGC_B_MCS07_MCS04))) { writeVal = pwr_val[3]; if (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_A_MCS07_MCS04) regoffset = 0xc90; if (regoffset == RTXAGC_B_MCS15_MCS12 || regoffset == RTXAGC_B_MCS07_MCS04) regoffset = 0xc98; for (i = 0; i < 3; i++) { writeVal = (writeVal > 6) ? (writeVal - 6) : 0; rtl_write_byte(rtlpriv, (u32)(regoffset + i), (u8)writeVal); } } } } void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel) { u32 writeVal[2], powerBase0[2], powerBase1[2]; u8 index = 0; rtl92c_phy_get_power_base(hw, ppowerlevel, channel, &powerBase0[0], &powerBase1[0]); for (index = 0; index < 6; index++) { _rtl92c_get_txpower_writeval_by_regulatory(hw, channel, index, &powerBase0[0], &powerBase1[0], &writeVal[0]); _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]); } } bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); bool rtstatus = true; u8 b_reg_hwparafile = 1; if (rtlphy->rf_type == RF_1T1R) rtlphy->num_total_rfpath = 1; else rtlphy->num_total_rfpath = 2; if (b_reg_hwparafile == 1) rtstatus = _rtl92c_phy_rf6052_config_parafile(hw); return rtstatus; } static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u32 u4_regvalue = 0; u8 rfpath; bool rtstatus = true; struct bb_reg_def *pphyreg; for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { pphyreg = &rtlphy->phyreg_def[rfpath]; switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV); break; case RF90_PATH_B: case RF90_PATH_D: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); break; } rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDREAALENGTH, 0x0); udelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); udelay(1); switch (rfpath) { case RF90_PATH_A: rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw, (enum radio_path) rfpath); break; case RF90_PATH_B: rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw, (enum radio_path) rfpath); break; case RF90_PATH_C: break; case RF90_PATH_D: break; } switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, u4_regvalue); break; case RF90_PATH_B: case RF90_PATH_D: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, u4_regvalue); break; } if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio[%d] Fail!!", rfpath); goto phy_rf_cfg_fail; } } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n"); return rtstatus; phy_rf_cfg_fail: return rtstatus; }
gpl-2.0
RenderBroken/render_kernel_motorola_msm8226
arch/arm/mach-msm/bms-batterydata.c
1775
3402
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/batterydata-lib.h> static struct single_row_lut fcc_temp = { .x = {-20, 0, 25, 40, 65}, .y = {1492, 1492, 1493, 1483, 1502}, .cols = 5 }; static struct pc_temp_ocv_lut pc_temp_ocv = { .rows = 29, .cols = 5, .temp = {-20, 0, 25, 40, 65}, .percent = {100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20, 15, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, .ocv = { {4173, 4167, 4163, 4156, 4154}, {4104, 4107, 4108, 4102, 4104}, {4057, 4072, 4069, 4061, 4060}, {3973, 4009, 4019, 4016, 4020}, {3932, 3959, 3981, 3982, 3983}, {3899, 3928, 3954, 3950, 3950}, {3868, 3895, 3925, 3921, 3920}, {3837, 3866, 3898, 3894, 3892}, {3812, 3841, 3853, 3856, 3862}, {3794, 3818, 3825, 3823, 3822}, {3780, 3799, 3804, 3804, 3803}, {3768, 3787, 3790, 3788, 3788}, {3757, 3779, 3778, 3775, 3776}, {3747, 3772, 3771, 3766, 3765}, {3736, 3763, 3766, 3760, 3746}, {3725, 3749, 3756, 3747, 3729}, {3714, 3718, 3734, 3724, 3706}, {3701, 3703, 3696, 3689, 3668}, {3675, 3695, 3682, 3675, 3662}, {3670, 3691, 3680, 3673, 3661}, {3661, 3686, 3679, 3672, 3656}, {3649, 3680, 3676, 3669, 3641}, {3633, 3669, 3667, 3655, 3606}, {3610, 3647, 3640, 3620, 3560}, {3580, 3607, 3596, 3572, 3501}, {3533, 3548, 3537, 3512, 3425}, {3457, 3468, 3459, 3429, 3324}, {3328, 3348, 3340, 3297, 3172}, {3000, 3000, 3000, 3000, 3000} } }; static struct sf_lut rbatt_sf = { .rows = 29, .cols = 5, /* row_entries are temperature */ .row_entries = {-20, 0, 20, 40, 65}, .percent = {100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20, 15, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, .sf = { {357, 187, 100, 91, 91}, {400, 208, 105, 94, 94}, {390, 204, 106, 95, 96}, {391, 201, 108, 98, 98}, {391, 202, 110, 98, 100}, {390, 200, 110, 99, 102}, {389, 200, 110, 99, 102}, {393, 202, 101, 93, 100}, {407, 205, 99, 89, 94}, {428, 208, 100, 91, 96}, {455, 212, 102, 92, 98}, {495, 220, 104, 93, 101}, {561, 232, 107, 95, 102}, {634, 245, 112, 98, 98}, {714, 258, 114, 98, 98}, {791, 266, 114, 97, 100}, {871, 289, 108, 95, 97}, {973, 340, 124, 108, 105}, {489, 241, 109, 96, 99}, {511, 246, 110, 96, 99}, {534, 252, 111, 95, 98}, {579, 263, 112, 96, 96}, {636, 276, 111, 95, 97}, {730, 294, 109, 96, 99}, {868, 328, 112, 98, 104}, {1089, 374, 119, 101, 115}, {1559, 457, 128, 105, 213}, {12886, 1026, 637, 422, 3269}, {170899, 127211, 98968, 88907, 77102}, } }; struct bms_battery_data palladium_1500_data = { .fcc = 1500, .fcc_temp_lut = &fcc_temp, .pc_temp_ocv_lut = &pc_temp_ocv, .rbatt_sf_lut = &rbatt_sf, .default_rbatt_mohm = 236, .rbatt_capacitive_mohm = 50, .flat_ocv_threshold_uv = 3800000, };
gpl-2.0
HTCCM9/android_kernel_htc_shooter
arch/arm/mach-at91/board-snapper9260.c
2287
4720
/* * linux/arch/arm/mach-at91/board-snapper9260.c * * Copyright (C) 2010 Bluewater System Ltd * * Author: Andre Renaud <andre@bluewatersys.com> * Author: Ryan Mallon <ryan@bluewatersys.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/i2c/pca953x.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/at91sam9_smc.h> #include "sam9_smc.h" #include "generic.h" #define SNAPPER9260_IO_EXP_GPIO(x) (NR_BUILTIN_GPIO + (x)) static void __init snapper9260_init_early(void) { at91sam9260_initialize(18432000); /* Debug on ttyS0 */ at91_register_uart(0, 0, 0); at91_set_serial_console(0); at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS); at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); at91_register_uart(AT91SAM9260_ID_US2, 3, 0); } static void __init snapper9260_init_irq(void) { at91sam9260_init_interrupts(NULL); } static struct at91_usbh_data __initdata snapper9260_usbh_data = { .ports = 2, }; static struct at91_udc_data __initdata snapper9260_udc_data = { .vbus_pin = SNAPPER9260_IO_EXP_GPIO(5), .vbus_active_low = 1, .vbus_polled = 1, }; static struct at91_eth_data snapper9260_macb_data = { .is_rmii = 1, }; static struct mtd_partition __initdata snapper9260_nand_partitions[] = { { .name = "Preboot", .offset = 0, .size = SZ_128K, }, { .name = "Bootloader", .offset = MTDPART_OFS_APPEND, .size = SZ_256K, }, { .name = "Environment", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, }, { .name = "Kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_4M, }, { .name = "Filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct mtd_partition * __init snapper9260_nand_partition_info(int size, int *num_partitions) { *num_partitions = ARRAY_SIZE(snapper9260_nand_partitions); return snapper9260_nand_partitions; } static struct atmel_nand_data __initdata snapper9260_nand_data = { .ale = 21, .cle = 22, .rdy_pin = AT91_PIN_PC13, .partition_info = snapper9260_nand_partition_info, .bus_width_16 = 0, }; static struct sam9_smc_config __initdata snapper9260_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 0, .ncs_write_setup = 0, .nwe_setup = 0, .ncs_read_pulse = 5, .nrd_pulse = 2, .ncs_write_pulse = 5, .nwe_pulse = 2, .read_cycle = 7, .write_cycle = 7, .mode = (AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE), .tdf_cycles = 1, }; static struct pca953x_platform_data snapper9260_io_expander_data = { .gpio_base = SNAPPER9260_IO_EXP_GPIO(0), }; static struct i2c_board_info __initdata snapper9260_i2c_devices[] = { { /* IO expander */ I2C_BOARD_INFO("max7312", 0x28), .platform_data = &snapper9260_io_expander_data, }, { /* Audio codec */ I2C_BOARD_INFO("tlv320aic23", 0x1a), }, { /* RTC */ I2C_BOARD_INFO("isl1208", 0x6f), .irq = gpio_to_irq(AT91_PIN_PA31), }, }; static void __init snapper9260_add_device_nand(void) { at91_set_A_periph(AT91_PIN_PC14, 0); sam9_smc_configure(3, &snapper9260_nand_smc_config); at91_add_device_nand(&snapper9260_nand_data); } static void __init snapper9260_board_init(void) { at91_add_device_i2c(snapper9260_i2c_devices, ARRAY_SIZE(snapper9260_i2c_devices)); at91_add_device_serial(); at91_add_device_usbh(&snapper9260_usbh_data); at91_add_device_udc(&snapper9260_udc_data); at91_add_device_eth(&snapper9260_macb_data); at91_add_device_ssc(AT91SAM9260_ID_SSC, (ATMEL_SSC_TF | ATMEL_SSC_TK | ATMEL_SSC_TD | ATMEL_SSC_RD)); snapper9260_add_device_nand(); } MACHINE_START(SNAPPER_9260, "Bluewater Systems Snapper 9260/9G20 module") .timer = &at91sam926x_timer, .map_io = at91sam9260_map_io, .init_early = snapper9260_init_early, .init_irq = snapper9260_init_irq, .init_machine = snapper9260_board_init, MACHINE_END
gpl-2.0
crewrktablets/android_kernel_odys_RK30_3.0.8
net/sched/act_skbedit.c
2543
5837
/* * Copyright (c) 2008, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Alexander Duyck <alexander.h.duyck@intel.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <linux/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h> #define SKBEDIT_TAB_MASK 15 static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1]; static u32 skbedit_idx_gen; static DEFINE_RWLOCK(skbedit_lock); static struct tcf_hashinfo skbedit_hash_info = { .htab = tcf_skbedit_ht, .hmask = SKBEDIT_TAB_MASK, .lock = &skbedit_lock, }; static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { struct tcf_skbedit *d = a->priv; spin_lock(&d->tcf_lock); d->tcf_tm.lastuse = jiffies; bstats_update(&d->tcf_bstats, skb); if (d->flags & SKBEDIT_F_PRIORITY) skb->priority = d->priority; if (d->flags & SKBEDIT_F_QUEUE_MAPPING && skb->dev->real_num_tx_queues > d->queue_mapping) skb_set_queue_mapping(skb, d->queue_mapping); if (d->flags & SKBEDIT_F_MARK) skb->mark = d->mark; spin_unlock(&d->tcf_lock); return d->tcf_action; } static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, }; static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; struct tc_skbedit *parm; struct tcf_skbedit *d; struct tcf_common *pc; u32 flags = 0, *priority = NULL, *mark = NULL; u16 *queue_mapping = NULL; int ret = 0, err; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy); if (err < 0) return err; if (tb[TCA_SKBEDIT_PARMS] == NULL) return -EINVAL; if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { flags |= SKBEDIT_F_PRIORITY; priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); } if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { flags |= SKBEDIT_F_QUEUE_MAPPING; queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); } if (tb[TCA_SKBEDIT_MARK] != NULL) { flags |= SKBEDIT_F_MARK; mark = nla_data(tb[TCA_SKBEDIT_MARK]); } if (!flags) return -EINVAL; parm = nla_data(tb[TCA_SKBEDIT_PARMS]); pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info); if (!pc) { pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, &skbedit_idx_gen, &skbedit_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); d = to_skbedit(pc); ret = ACT_P_CREATED; } else { d = to_skbedit(pc); if (!ovr) { tcf_hash_release(pc, bind, &skbedit_hash_info); return -EEXIST; } } spin_lock_bh(&d->tcf_lock); d->flags = flags; if (flags & SKBEDIT_F_PRIORITY) d->priority = *priority; if (flags & SKBEDIT_F_QUEUE_MAPPING) d->queue_mapping = *queue_mapping; if (flags & SKBEDIT_F_MARK) d->mark = *mark; d->tcf_action = parm->action; spin_unlock_bh(&d->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &skbedit_hash_info); return ret; } static int tcf_skbedit_cleanup(struct tc_action *a, int bind) { struct tcf_skbedit *d = a->priv; if (d) return tcf_hash_release(&d->common, bind, &skbedit_hash_info); return 0; } static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_skbedit *d = a->priv; struct tc_skbedit opt = { .index = d->tcf_index, .refcnt = d->tcf_refcnt - ref, .bindcnt = d->tcf_bindcnt - bind, .action = d->tcf_action, }; struct tcf_t t; NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); if (d->flags & SKBEDIT_F_PRIORITY) NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), &d->priority); if (d->flags & SKBEDIT_F_QUEUE_MAPPING) NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, sizeof(d->queue_mapping), &d->queue_mapping); if (d->flags & SKBEDIT_F_MARK) NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), &d->mark); t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(d->tcf_tm.expires); NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .hinfo = &skbedit_hash_info, .type = TCA_ACT_SKBEDIT, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_skbedit, .dump = tcf_skbedit_dump, .cleanup = tcf_skbedit_cleanup, .init = tcf_skbedit_init, .walk = tcf_generic_walker, }; MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>"); MODULE_DESCRIPTION("SKB Editing"); MODULE_LICENSE("GPL"); static int __init skbedit_init_module(void) { return tcf_register_action(&act_skbedit_ops); } static void __exit skbedit_cleanup_module(void) { tcf_unregister_action(&act_skbedit_ops); } module_init(skbedit_init_module); module_exit(skbedit_cleanup_module);
gpl-2.0
clemsyn/Grouper
arch/arm/mach-omap1/clock.c
3055
13921
/* * linux/arch/arm/mach-omap1/clock.c * * Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * * Modified to use omap shared clock framework by * Tony Lindgren <tony@atomide.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/mach-types.h> #include <plat/cpu.h> #include <plat/usb.h> #include <plat/clock.h> #include <plat/sram.h> #include <plat/clkdev_omap.h> #include "clock.h" #include "opp.h" __u32 arm_idlect1_mask; struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p; /* * Omap1 specific clock functions */ unsigned long omap1_uart_recalc(struct clk *clk) { unsigned int val = __raw_readl(clk->enable_reg); return val & clk->enable_bit ? 48000000 : 12000000; } unsigned long omap1_sossi_recalc(struct clk *clk) { u32 div = omap_readl(MOD_CONF_CTRL_1); div = (div >> 17) & 0x7; div++; return clk->parent->rate / div; } static void omap1_clk_allow_idle(struct clk *clk) { struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk; if (!(clk->flags & CLOCK_IDLE_CONTROL)) return; if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count)) arm_idlect1_mask |= 1 << iclk->idlect_shift; } static void omap1_clk_deny_idle(struct clk *clk) { struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk; if (!(clk->flags & CLOCK_IDLE_CONTROL)) return; if (iclk->no_idle_count++ == 0) arm_idlect1_mask &= ~(1 << iclk->idlect_shift); } static __u16 verify_ckctl_value(__u16 newval) { /* This function checks for following limitations set * by the hardware (all conditions must be true): * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2 * ARM_CK >= TC_CK * DSP_CK >= TC_CK * DSPMMU_CK >= TC_CK * * In addition following rules are enforced: * LCD_CK <= TC_CK * ARMPER_CK <= TC_CK * * However, maximum frequencies are not checked for! */ __u8 per_exp; __u8 lcd_exp; __u8 arm_exp; __u8 dsp_exp; __u8 tc_exp; __u8 dspmmu_exp; per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3; lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3; arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3; dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3; tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3; dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3; if (dspmmu_exp < dsp_exp) dspmmu_exp = dsp_exp; if (dspmmu_exp > dsp_exp+1) dspmmu_exp = dsp_exp+1; if (tc_exp < arm_exp) tc_exp = arm_exp; if (tc_exp < dspmmu_exp) tc_exp = dspmmu_exp; if (tc_exp > lcd_exp) lcd_exp = tc_exp; if (tc_exp > per_exp) per_exp = tc_exp; newval &= 0xf000; newval |= per_exp << CKCTL_PERDIV_OFFSET; newval |= lcd_exp << CKCTL_LCDDIV_OFFSET; newval |= arm_exp << CKCTL_ARMDIV_OFFSET; newval |= dsp_exp << CKCTL_DSPDIV_OFFSET; newval |= tc_exp << CKCTL_TCDIV_OFFSET; newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET; return newval; } static int calc_dsor_exp(struct clk *clk, unsigned long rate) { /* Note: If target frequency is too low, this function will return 4, * which is invalid value. Caller must check for this value and act * accordingly. * * Note: This function does not check for following limitations set * by the hardware (all conditions must be true): * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2 * ARM_CK >= TC_CK * DSP_CK >= TC_CK * DSPMMU_CK >= TC_CK */ unsigned long realrate; struct clk * parent; unsigned dsor_exp; parent = clk->parent; if (unlikely(parent == NULL)) return -EIO; realrate = parent->rate; for (dsor_exp=0; dsor_exp<4; dsor_exp++) { if (realrate <= rate) break; realrate /= 2; } return dsor_exp; } unsigned long omap1_ckctl_recalc(struct clk *clk) { /* Calculate divisor encoded as 2-bit exponent */ int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset)); return clk->parent->rate / dsor; } unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk) { int dsor; /* Calculate divisor encoded as 2-bit exponent * * The clock control bits are in DSP domain, * so api_ck is needed for access. * Note that DSP_CKCTL virt addr = phys addr, so * we must use __raw_readw() instead of omap_readw(). */ omap1_clk_enable(api_ck_p); dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset)); omap1_clk_disable(api_ck_p); return clk->parent->rate / dsor; } /* MPU virtual clock functions */ int omap1_select_table_rate(struct clk *clk, unsigned long rate) { /* Find the highest supported frequency <= rate and switch to it */ struct mpu_rate * ptr; unsigned long dpll1_rate, ref_rate; dpll1_rate = ck_dpll1_p->rate; ref_rate = ck_ref_p->rate; for (ptr = omap1_rate_table; ptr->rate; ptr++) { if (ptr->xtal != ref_rate) continue; /* DPLL1 cannot be reprogrammed without risking system crash */ if (likely(dpll1_rate != 0) && ptr->pll_rate != dpll1_rate) continue; /* Can check only after xtal frequency check */ if (ptr->rate <= rate) break; } if (!ptr->rate) return -EINVAL; /* * In most cases we should not need to reprogram DPLL. * Reprogramming the DPLL is tricky, it must be done from SRAM. * (on 730, bit 13 must always be 1) */ if (cpu_is_omap7xx()) omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val | 0x2000); else omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val); /* XXX Do we need to recalculate the tree below DPLL1 at this point? */ ck_dpll1_p->rate = ptr->pll_rate; return 0; } int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate) { int dsor_exp; u16 regval; dsor_exp = calc_dsor_exp(clk, rate); if (dsor_exp > 3) dsor_exp = -EINVAL; if (dsor_exp < 0) return dsor_exp; regval = __raw_readw(DSP_CKCTL); regval &= ~(3 << clk->rate_offset); regval |= dsor_exp << clk->rate_offset; __raw_writew(regval, DSP_CKCTL); clk->rate = clk->parent->rate / (1 << dsor_exp); return 0; } long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate) { int dsor_exp = calc_dsor_exp(clk, rate); if (dsor_exp < 0) return dsor_exp; if (dsor_exp > 3) dsor_exp = 3; return clk->parent->rate / (1 << dsor_exp); } int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate) { int dsor_exp; u16 regval; dsor_exp = calc_dsor_exp(clk, rate); if (dsor_exp > 3) dsor_exp = -EINVAL; if (dsor_exp < 0) return dsor_exp; regval = omap_readw(ARM_CKCTL); regval &= ~(3 << clk->rate_offset); regval |= dsor_exp << clk->rate_offset; regval = verify_ckctl_value(regval); omap_writew(regval, ARM_CKCTL); clk->rate = clk->parent->rate / (1 << dsor_exp); return 0; } long omap1_round_to_table_rate(struct clk *clk, unsigned long rate) { /* Find the highest supported frequency <= rate */ struct mpu_rate * ptr; long highest_rate; unsigned long ref_rate; ref_rate = ck_ref_p->rate; highest_rate = -EINVAL; for (ptr = omap1_rate_table; ptr->rate; ptr++) { if (ptr->xtal != ref_rate) continue; highest_rate = ptr->rate; /* Can check only after xtal frequency check */ if (ptr->rate <= rate) break; } return highest_rate; } static unsigned calc_ext_dsor(unsigned long rate) { unsigned dsor; /* MCLK and BCLK divisor selection is not linear: * freq = 96MHz / dsor * * RATIO_SEL range: dsor <-> RATIO_SEL * 0..6: (RATIO_SEL+2) <-> (dsor-2) * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6) * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9 * can not be used. */ for (dsor = 2; dsor < 96; ++dsor) { if ((dsor & 1) && dsor > 8) continue; if (rate >= 96000000 / dsor) break; } return dsor; } /* XXX Only needed on 1510 */ int omap1_set_uart_rate(struct clk *clk, unsigned long rate) { unsigned int val; val = __raw_readl(clk->enable_reg); if (rate == 12000000) val &= ~(1 << clk->enable_bit); else if (rate == 48000000) val |= (1 << clk->enable_bit); else return -EINVAL; __raw_writel(val, clk->enable_reg); clk->rate = rate; return 0; } /* External clock (MCLK & BCLK) functions */ int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate) { unsigned dsor; __u16 ratio_bits; dsor = calc_ext_dsor(rate); clk->rate = 96000000 / dsor; if (dsor > 8) ratio_bits = ((dsor - 8) / 2 + 6) << 2; else ratio_bits = (dsor - 2) << 2; ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd; __raw_writew(ratio_bits, clk->enable_reg); return 0; } int omap1_set_sossi_rate(struct clk *clk, unsigned long rate) { u32 l; int div; unsigned long p_rate; p_rate = clk->parent->rate; /* Round towards slower frequency */ div = (p_rate + rate - 1) / rate; div--; if (div < 0 || div > 7) return -EINVAL; l = omap_readl(MOD_CONF_CTRL_1); l &= ~(7 << 17); l |= div << 17; omap_writel(l, MOD_CONF_CTRL_1); clk->rate = p_rate / (div + 1); return 0; } long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate) { return 96000000 / calc_ext_dsor(rate); } void omap1_init_ext_clk(struct clk *clk) { unsigned dsor; __u16 ratio_bits; /* Determine current rate and ensure clock is based on 96MHz APLL */ ratio_bits = __raw_readw(clk->enable_reg) & ~1; __raw_writew(ratio_bits, clk->enable_reg); ratio_bits = (ratio_bits & 0xfc) >> 2; if (ratio_bits > 6) dsor = (ratio_bits - 6) * 2 + 8; else dsor = ratio_bits + 2; clk-> rate = 96000000 / dsor; } int omap1_clk_enable(struct clk *clk) { int ret = 0; if (clk->usecount++ == 0) { if (clk->parent) { ret = omap1_clk_enable(clk->parent); if (ret) goto err; if (clk->flags & CLOCK_NO_IDLE_PARENT) omap1_clk_deny_idle(clk->parent); } ret = clk->ops->enable(clk); if (ret) { if (clk->parent) omap1_clk_disable(clk->parent); goto err; } } return ret; err: clk->usecount--; return ret; } void omap1_clk_disable(struct clk *clk) { if (clk->usecount > 0 && !(--clk->usecount)) { clk->ops->disable(clk); if (likely(clk->parent)) { omap1_clk_disable(clk->parent); if (clk->flags & CLOCK_NO_IDLE_PARENT) omap1_clk_allow_idle(clk->parent); } } } static int omap1_clk_enable_generic(struct clk *clk) { __u16 regval16; __u32 regval32; if (unlikely(clk->enable_reg == NULL)) { printk(KERN_ERR "clock.c: Enable for %s without enable code\n", clk->name); return -EINVAL; } if (clk->flags & ENABLE_REG_32BIT) { regval32 = __raw_readl(clk->enable_reg); regval32 |= (1 << clk->enable_bit); __raw_writel(regval32, clk->enable_reg); } else { regval16 = __raw_readw(clk->enable_reg); regval16 |= (1 << clk->enable_bit); __raw_writew(regval16, clk->enable_reg); } return 0; } static void omap1_clk_disable_generic(struct clk *clk) { __u16 regval16; __u32 regval32; if (clk->enable_reg == NULL) return; if (clk->flags & ENABLE_REG_32BIT) { regval32 = __raw_readl(clk->enable_reg); regval32 &= ~(1 << clk->enable_bit); __raw_writel(regval32, clk->enable_reg); } else { regval16 = __raw_readw(clk->enable_reg); regval16 &= ~(1 << clk->enable_bit); __raw_writew(regval16, clk->enable_reg); } } const struct clkops clkops_generic = { .enable = omap1_clk_enable_generic, .disable = omap1_clk_disable_generic, }; static int omap1_clk_enable_dsp_domain(struct clk *clk) { int retval; retval = omap1_clk_enable(api_ck_p); if (!retval) { retval = omap1_clk_enable_generic(clk); omap1_clk_disable(api_ck_p); } return retval; } static void omap1_clk_disable_dsp_domain(struct clk *clk) { if (omap1_clk_enable(api_ck_p) == 0) { omap1_clk_disable_generic(clk); omap1_clk_disable(api_ck_p); } } const struct clkops clkops_dspck = { .enable = omap1_clk_enable_dsp_domain, .disable = omap1_clk_disable_dsp_domain, }; /* XXX SYSC register handling does not belong in the clock framework */ static int omap1_clk_enable_uart_functional_16xx(struct clk *clk) { int ret; struct uart_clk *uclk; ret = omap1_clk_enable_generic(clk); if (ret == 0) { /* Set smart idle acknowledgement mode */ uclk = (struct uart_clk *)clk; omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8, uclk->sysc_addr); } return ret; } /* XXX SYSC register handling does not belong in the clock framework */ static void omap1_clk_disable_uart_functional_16xx(struct clk *clk) { struct uart_clk *uclk; /* Set force idle acknowledgement mode */ uclk = (struct uart_clk *)clk; omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr); omap1_clk_disable_generic(clk); } /* XXX SYSC register handling does not belong in the clock framework */ const struct clkops clkops_uart_16xx = { .enable = omap1_clk_enable_uart_functional_16xx, .disable = omap1_clk_disable_uart_functional_16xx, }; long omap1_clk_round_rate(struct clk *clk, unsigned long rate) { if (clk->round_rate != NULL) return clk->round_rate(clk, rate); return clk->rate; } int omap1_clk_set_rate(struct clk *clk, unsigned long rate) { int ret = -EINVAL; if (clk->set_rate) ret = clk->set_rate(clk, rate); return ret; } /* * Omap1 clock reset and init functions */ #ifdef CONFIG_OMAP_RESET_CLOCKS void omap1_clk_disable_unused(struct clk *clk) { __u32 regval32; /* Clocks in the DSP domain need api_ck. Just assume bootloader * has not enabled any DSP clocks */ if (clk->enable_reg == DSP_IDLECT2) { printk(KERN_INFO "Skipping reset check for DSP domain " "clock \"%s\"\n", clk->name); return; } /* Is the clock already disabled? */ if (clk->flags & ENABLE_REG_32BIT) regval32 = __raw_readl(clk->enable_reg); else regval32 = __raw_readw(clk->enable_reg); if ((regval32 & (1 << clk->enable_bit)) == 0) return; printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name); clk->ops->disable(clk); printk(" done\n"); } #endif
gpl-2.0
ztemt/NX404H_kernel
drivers/bcma/main.c
3311
9131
/* * Broadcom specific AMBA * Bus subsystem * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/module.h> #include <linux/bcma/bcma.h> #include <linux/slab.h> MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); MODULE_LICENSE("GPL"); /* contains the number the next bus should get. */ static unsigned int bcma_bus_next_num = 0; /* bcma_buses_mutex locks the bcma_bus_next_num */ static DEFINE_MUTEX(bcma_buses_mutex); static int bcma_bus_match(struct device *dev, struct device_driver *drv); static int bcma_device_probe(struct device *dev); static int bcma_device_remove(struct device *dev); static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env); static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); return sprintf(buf, "0x%03X\n", core->id.manuf); } static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); return sprintf(buf, "0x%03X\n", core->id.id); } static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); return sprintf(buf, "0x%02X\n", core->id.rev); } static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); return sprintf(buf, "0x%X\n", core->id.class); } static struct device_attribute bcma_device_attrs[] = { __ATTR_RO(manuf), __ATTR_RO(id), __ATTR_RO(rev), __ATTR_RO(class), __ATTR_NULL, }; static struct bus_type bcma_bus_type = { .name = "bcma", .match = bcma_bus_match, .probe = bcma_device_probe, .remove = bcma_device_remove, .uevent = bcma_device_uevent, .dev_attrs = bcma_device_attrs, }; struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid) { struct bcma_device *core; list_for_each_entry(core, &bus->cores, list) { if (core->id.id == coreid) return core; } return NULL; } EXPORT_SYMBOL_GPL(bcma_find_core); static void bcma_release_core_dev(struct device *dev) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); if (core->io_addr) iounmap(core->io_addr); if (core->io_wrap) iounmap(core->io_wrap); kfree(core); } static int bcma_register_cores(struct bcma_bus *bus) { struct bcma_device *core; int err, dev_id = 0; list_for_each_entry(core, &bus->cores, list) { /* We support that cores ourself */ switch (core->id.id) { case BCMA_CORE_CHIPCOMMON: case BCMA_CORE_PCI: case BCMA_CORE_PCIE: case BCMA_CORE_MIPS_74K: continue; } core->dev.release = bcma_release_core_dev; core->dev.bus = &bcma_bus_type; dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id); switch (bus->hosttype) { case BCMA_HOSTTYPE_PCI: core->dev.parent = &bus->host_pci->dev; core->dma_dev = &bus->host_pci->dev; core->irq = bus->host_pci->irq; break; case BCMA_HOSTTYPE_SOC: core->dev.dma_mask = &core->dev.coherent_dma_mask; core->dma_dev = &core->dev; break; case BCMA_HOSTTYPE_SDIO: break; } err = device_register(&core->dev); if (err) { pr_err("Could not register dev for core 0x%03X\n", core->id.id); continue; } core->dev_registered = true; dev_id++; } return 0; } static void bcma_unregister_cores(struct bcma_bus *bus) { struct bcma_device *core; list_for_each_entry(core, &bus->cores, list) { if (core->dev_registered) device_unregister(&core->dev); } } int __devinit bcma_bus_register(struct bcma_bus *bus) { int err; struct bcma_device *core; mutex_lock(&bcma_buses_mutex); bus->num = bcma_bus_next_num++; mutex_unlock(&bcma_buses_mutex); /* Scan for devices (cores) */ err = bcma_bus_scan(bus); if (err) { pr_err("Failed to scan: %d\n", err); return -1; } /* Init CC core */ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON); if (core) { bus->drv_cc.core = core; bcma_core_chipcommon_init(&bus->drv_cc); } /* Init MIPS core */ core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); if (core) { bus->drv_mips.core = core; bcma_core_mips_init(&bus->drv_mips); } /* Init PCIE core */ core = bcma_find_core(bus, BCMA_CORE_PCIE); if (core) { bus->drv_pci.core = core; bcma_core_pci_init(&bus->drv_pci); } /* Try to get SPROM */ err = bcma_sprom_get(bus); if (err == -ENOENT) { pr_err("No SPROM available\n"); } else if (err) pr_err("Failed to get SPROM: %d\n", err); /* Register found cores */ bcma_register_cores(bus); pr_info("Bus registered\n"); return 0; } void bcma_bus_unregister(struct bcma_bus *bus) { bcma_unregister_cores(bus); } int __init bcma_bus_early_register(struct bcma_bus *bus, struct bcma_device *core_cc, struct bcma_device *core_mips) { int err; struct bcma_device *core; struct bcma_device_id match; bcma_init_bus(bus); match.manuf = BCMA_MANUF_BCM; match.id = BCMA_CORE_CHIPCOMMON; match.class = BCMA_CL_SIM; match.rev = BCMA_ANY_REV; /* Scan for chip common core */ err = bcma_bus_scan_early(bus, &match, core_cc); if (err) { pr_err("Failed to scan for common core: %d\n", err); return -1; } match.manuf = BCMA_MANUF_MIPS; match.id = BCMA_CORE_MIPS_74K; match.class = BCMA_CL_SIM; match.rev = BCMA_ANY_REV; /* Scan for mips core */ err = bcma_bus_scan_early(bus, &match, core_mips); if (err) { pr_err("Failed to scan for mips core: %d\n", err); return -1; } /* Init CC core */ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON); if (core) { bus->drv_cc.core = core; bcma_core_chipcommon_init(&bus->drv_cc); } /* Init MIPS core */ core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); if (core) { bus->drv_mips.core = core; bcma_core_mips_init(&bus->drv_mips); } pr_info("Early bus registered\n"); return 0; } #ifdef CONFIG_PM int bcma_bus_suspend(struct bcma_bus *bus) { struct bcma_device *core; list_for_each_entry(core, &bus->cores, list) { struct device_driver *drv = core->dev.driver; if (drv) { struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); if (adrv->suspend) adrv->suspend(core); } } return 0; } int bcma_bus_resume(struct bcma_bus *bus) { struct bcma_device *core; /* Init CC core */ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON); if (core) { bus->drv_cc.setup_done = false; bcma_core_chipcommon_init(&bus->drv_cc); } list_for_each_entry(core, &bus->cores, list) { struct device_driver *drv = core->dev.driver; if (drv) { struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); if (adrv->resume) adrv->resume(core); } } return 0; } #endif int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) { drv->drv.name = drv->name; drv->drv.bus = &bcma_bus_type; drv->drv.owner = owner; return driver_register(&drv->drv); } EXPORT_SYMBOL_GPL(__bcma_driver_register); void bcma_driver_unregister(struct bcma_driver *drv) { driver_unregister(&drv->drv); } EXPORT_SYMBOL_GPL(bcma_driver_unregister); static int bcma_bus_match(struct device *dev, struct device_driver *drv) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); const struct bcma_device_id *cid = &core->id; const struct bcma_device_id *did; for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) { if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) && (did->id == cid->id || did->id == BCMA_ANY_ID) && (did->rev == cid->rev || did->rev == BCMA_ANY_REV) && (did->class == cid->class || did->class == BCMA_ANY_CLASS)) return 1; } return 0; } static int bcma_device_probe(struct device *dev) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, drv); int err = 0; if (adrv->probe) err = adrv->probe(core); return err; } static int bcma_device_remove(struct device *dev) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, drv); if (adrv->remove) adrv->remove(core); return 0; } static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) { struct bcma_device *core = container_of(dev, struct bcma_device, dev); return add_uevent_var(env, "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", core->id.manuf, core->id.id, core->id.rev, core->id.class); } static int __init bcma_modinit(void) { int err; err = bus_register(&bcma_bus_type); if (err) return err; #ifdef CONFIG_BCMA_HOST_PCI err = bcma_host_pci_init(); if (err) { pr_err("PCI host initialization failed\n"); err = 0; } #endif return err; } fs_initcall(bcma_modinit); static void __exit bcma_modexit(void) { #ifdef CONFIG_BCMA_HOST_PCI bcma_host_pci_exit(); #endif bus_unregister(&bcma_bus_type); } module_exit(bcma_modexit)
gpl-2.0
CyanogenMod/android_kernel_sony_flamingo
drivers/hwmon/w83627hf.c
4847
56094
/* * w83627hf.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (c) 1998 - 2003 Frodo Looijaard <frodol@dds.nl>, * Philip Edelbrock <phil@netroedge.com>, * and Mark Studebaker <mdsxyz123@yahoo.com> * Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org> * Copyright (c) 2007 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Supports following chips: * * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83627hf 9 3 2 3 0x20 0x5ca3 no yes(LPC) * w83627thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) * w83637hf 7 3 3 3 0x80 0x5ca3 no yes(LPC) * w83687thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) * w83697hf 8 2 2 2 0x60 0x5ca3 no yes(LPC) * * For other winbond chips, and for i2c support in the above chips, * use w83781d.c. * * Note: automatic ("cruise") fan control for 697, 637 & 627thf not * supported yet. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/io.h> #include "lm75.h" static struct platform_device *pdev; #define DRVNAME "w83627hf" enum chips { w83627hf, w83627thf, w83697hf, w83637hf, w83687thf }; struct w83627hf_sio_data { enum chips type; int sioaddr; }; static u8 force_i2c = 0x1f; module_param(force_i2c, byte, 0); MODULE_PARM_DESC(force_i2c, "Initialize the i2c address of the sensors"); static bool init = 1; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization"); static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); /* modified from kernel/include/traps.c */ #define DEV 0x07 /* Register: Logical device select */ /* logical device numbers for superio_select (below) */ #define W83627HF_LD_FDC 0x00 #define W83627HF_LD_PRT 0x01 #define W83627HF_LD_UART1 0x02 #define W83627HF_LD_UART2 0x03 #define W83627HF_LD_KBC 0x05 #define W83627HF_LD_CIR 0x06 /* w83627hf only */ #define W83627HF_LD_GAME 0x07 #define W83627HF_LD_MIDI 0x07 #define W83627HF_LD_GPIO1 0x07 #define W83627HF_LD_GPIO5 0x07 /* w83627thf only */ #define W83627HF_LD_GPIO2 0x08 #define W83627HF_LD_GPIO3 0x09 #define W83627HF_LD_GPIO4 0x09 /* w83627thf only */ #define W83627HF_LD_ACPI 0x0a #define W83627HF_LD_HWM 0x0b #define DEVID 0x20 /* Register: Device ID */ #define W83627THF_GPIO5_EN 0x30 /* w83627thf only */ #define W83627THF_GPIO5_IOSR 0xf3 /* w83627thf only */ #define W83627THF_GPIO5_DR 0xf4 /* w83627thf only */ #define W83687THF_VID_EN 0x29 /* w83687thf only */ #define W83687THF_VID_CFG 0xF0 /* w83687thf only */ #define W83687THF_VID_DATA 0xF1 /* w83687thf only */ static inline void superio_outb(struct w83627hf_sio_data *sio, int reg, int val) { outb(reg, sio->sioaddr); outb(val, sio->sioaddr + 1); } static inline int superio_inb(struct w83627hf_sio_data *sio, int reg) { outb(reg, sio->sioaddr); return inb(sio->sioaddr + 1); } static inline void superio_select(struct w83627hf_sio_data *sio, int ld) { outb(DEV, sio->sioaddr); outb(ld, sio->sioaddr + 1); } static inline void superio_enter(struct w83627hf_sio_data *sio) { outb(0x87, sio->sioaddr); outb(0x87, sio->sioaddr); } static inline void superio_exit(struct w83627hf_sio_data *sio) { outb(0xAA, sio->sioaddr); } #define W627_DEVID 0x52 #define W627THF_DEVID 0x82 #define W697_DEVID 0x60 #define W637_DEVID 0x70 #define W687THF_DEVID 0x85 #define WINB_ACT_REG 0x30 #define WINB_BASE_REG 0x60 /* Constants specified below */ /* Alignment of the base address */ #define WINB_ALIGNMENT ~7 /* Offset & size of I/O region we are interested in */ #define WINB_REGION_OFFSET 5 #define WINB_REGION_SIZE 2 /* Where are the sensors address/data registers relative to the region offset */ #define W83781D_ADDR_REG_OFFSET 0 #define W83781D_DATA_REG_OFFSET 1 /* The W83781D registers */ /* The W83782D registers for nr=7,8 are in bank 5 */ #define W83781D_REG_IN_MAX(nr) ((nr < 7) ? (0x2b + (nr) * 2) : \ (0x554 + (((nr) - 7) * 2))) #define W83781D_REG_IN_MIN(nr) ((nr < 7) ? (0x2c + (nr) * 2) : \ (0x555 + (((nr) - 7) * 2))) #define W83781D_REG_IN(nr) ((nr < 7) ? (0x20 + (nr)) : \ (0x550 + (nr) - 7)) /* nr:0-2 for fans:1-3 */ #define W83627HF_REG_FAN_MIN(nr) (0x3b + (nr)) #define W83627HF_REG_FAN(nr) (0x28 + (nr)) #define W83627HF_REG_TEMP2_CONFIG 0x152 #define W83627HF_REG_TEMP3_CONFIG 0x252 /* these are zero-based, unlike config constants above */ static const u16 w83627hf_reg_temp[] = { 0x27, 0x150, 0x250 }; static const u16 w83627hf_reg_temp_hyst[] = { 0x3A, 0x153, 0x253 }; static const u16 w83627hf_reg_temp_over[] = { 0x39, 0x155, 0x255 }; #define W83781D_REG_BANK 0x4E #define W83781D_REG_CONFIG 0x40 #define W83781D_REG_ALARM1 0x459 #define W83781D_REG_ALARM2 0x45A #define W83781D_REG_ALARM3 0x45B #define W83781D_REG_BEEP_CONFIG 0x4D #define W83781D_REG_BEEP_INTS1 0x56 #define W83781D_REG_BEEP_INTS2 0x57 #define W83781D_REG_BEEP_INTS3 0x453 #define W83781D_REG_VID_FANDIV 0x47 #define W83781D_REG_CHIPID 0x49 #define W83781D_REG_WCHIPID 0x58 #define W83781D_REG_CHIPMAN 0x4F #define W83781D_REG_PIN 0x4B #define W83781D_REG_VBAT 0x5D #define W83627HF_REG_PWM1 0x5A #define W83627HF_REG_PWM2 0x5B static const u8 W83627THF_REG_PWM_ENABLE[] = { 0x04, /* FAN 1 mode */ 0x04, /* FAN 2 mode */ 0x12, /* FAN AUX mode */ }; static const u8 W83627THF_PWM_ENABLE_SHIFT[] = { 2, 4, 1 }; #define W83627THF_REG_PWM1 0x01 /* 697HF/637HF/687THF too */ #define W83627THF_REG_PWM2 0x03 /* 697HF/637HF/687THF too */ #define W83627THF_REG_PWM3 0x11 /* 637HF/687THF too */ #define W83627THF_REG_VRM_OVT_CFG 0x18 /* 637HF/687THF too */ static const u8 regpwm_627hf[] = { W83627HF_REG_PWM1, W83627HF_REG_PWM2 }; static const u8 regpwm[] = { W83627THF_REG_PWM1, W83627THF_REG_PWM2, W83627THF_REG_PWM3 }; #define W836X7HF_REG_PWM(type, nr) (((type) == w83627hf) ? \ regpwm_627hf[nr] : regpwm[nr]) #define W83627HF_REG_PWM_FREQ 0x5C /* Only for the 627HF */ #define W83637HF_REG_PWM_FREQ1 0x00 /* 697HF/687THF too */ #define W83637HF_REG_PWM_FREQ2 0x02 /* 697HF/687THF too */ #define W83637HF_REG_PWM_FREQ3 0x10 /* 687THF too */ static const u8 W83637HF_REG_PWM_FREQ[] = { W83637HF_REG_PWM_FREQ1, W83637HF_REG_PWM_FREQ2, W83637HF_REG_PWM_FREQ3 }; #define W83627HF_BASE_PWM_FREQ 46870 #define W83781D_REG_I2C_ADDR 0x48 #define W83781D_REG_I2C_SUBADDR 0x4A /* Sensor selection */ #define W83781D_REG_SCFG1 0x5D static const u8 BIT_SCFG1[] = { 0x02, 0x04, 0x08 }; #define W83781D_REG_SCFG2 0x59 static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 }; #define W83781D_DEFAULT_BETA 3435 /* * Conversions. Limit checking is only done on the TO_REG * variants. Note that you should be a bit careful with which arguments * these macros are called: arguments may be evaluated more than once. * Fixing this is just not worth it. */ #define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8)/16),0,255)) #define IN_FROM_REG(val) ((val) * 16) static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm == 0) return 255; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } #define TEMP_MIN (-128000) #define TEMP_MAX ( 127000) /* * TEMP: 0.001C/bit (-128C to +127C) * REG: 1C/bit, two's complement */ static u8 TEMP_TO_REG(long temp) { int ntemp = SENSORS_LIMIT(temp, TEMP_MIN, TEMP_MAX); ntemp += (ntemp<0 ? -500 : 500); return (u8)(ntemp / 1000); } static int TEMP_FROM_REG(u8 reg) { return (s8)reg * 1000; } #define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div))) #define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255)) static inline unsigned long pwm_freq_from_reg_627hf(u8 reg) { unsigned long freq; freq = W83627HF_BASE_PWM_FREQ >> reg; return freq; } static inline u8 pwm_freq_to_reg_627hf(unsigned long val) { u8 i; /* * Only 5 dividers (1 2 4 8 16) * Search for the nearest available frequency */ for (i = 0; i < 4; i++) { if (val > (((W83627HF_BASE_PWM_FREQ >> i) + (W83627HF_BASE_PWM_FREQ >> (i+1))) / 2)) break; } return i; } static inline unsigned long pwm_freq_from_reg(u8 reg) { /* Clock bit 8 -> 180 kHz or 24 MHz */ unsigned long clock = (reg & 0x80) ? 180000UL : 24000000UL; reg &= 0x7f; /* This should not happen but anyway... */ if (reg == 0) reg++; return clock / (reg << 8); } static inline u8 pwm_freq_to_reg(unsigned long val) { /* Minimum divider value is 0x01 and maximum is 0x7F */ if (val >= 93750) /* The highest we can do */ return 0x01; if (val >= 720) /* Use 24 MHz clock */ return 24000000UL / (val << 8); if (val < 6) /* The lowest we can do */ return 0xFF; else /* Use 180 kHz clock */ return 0x80 | (180000UL / (val << 8)); } #define BEEP_MASK_FROM_REG(val) ((val) & 0xff7fff) #define BEEP_MASK_TO_REG(val) ((val) & 0xff7fff) #define DIV_FROM_REG(val) (1 << (val)) static inline u8 DIV_TO_REG(long val) { int i; val = SENSORS_LIMIT(val, 1, 128) >> 1; for (i = 0; i < 7; i++) { if (val == 0) break; val >>= 1; } return (u8)i; } /* * For each registered chip, we need to keep some data in memory. * The structure is dynamically allocated. */ struct w83627hf_data { unsigned short addr; const char *name; struct device *hwmon_dev; struct mutex lock; enum chips type; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[9]; /* Register value */ u8 in_max[9]; /* Register value */ u8 in_min[9]; /* Register value */ u8 fan[3]; /* Register value */ u8 fan_min[3]; /* Register value */ u16 temp[3]; /* Register value */ u16 temp_max[3]; /* Register value */ u16 temp_max_hyst[3]; /* Register value */ u8 fan_div[3]; /* Register encoding, shifted right */ u8 vid; /* Register encoding, combined */ u32 alarms; /* Register encoding, combined */ u32 beep_mask; /* Register encoding, combined */ u8 pwm[3]; /* Register value */ u8 pwm_enable[3]; /* 1 = manual * 2 = thermal cruise (also called SmartFan I) * 3 = fan speed cruise */ u8 pwm_freq[3]; /* Register value */ u16 sens[3]; /* 1 = pentium diode; 2 = 3904 diode; * 4 = thermistor */ u8 vrm; u8 vrm_ovt; /* Register value, 627THF/637HF/687THF only */ }; static int w83627hf_probe(struct platform_device *pdev); static int __devexit w83627hf_remove(struct platform_device *pdev); static int w83627hf_read_value(struct w83627hf_data *data, u16 reg); static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value); static void w83627hf_update_fan_div(struct w83627hf_data *data); static struct w83627hf_data *w83627hf_update_device(struct device *dev); static void w83627hf_init_device(struct platform_device *pdev); static struct platform_driver w83627hf_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = w83627hf_probe, .remove = __devexit_p(w83627hf_remove), }; static ssize_t show_in_input(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr])); } static ssize_t show_in_min(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr])); } static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr])); } static ssize_t store_in_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MIN(nr), data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MAX(nr), data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define sysfs_vin_decl(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in_input, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO|S_IWUSR, \ show_in_min, store_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO|S_IWUSR, \ show_in_max, store_in_max, offset); sysfs_vin_decl(1); sysfs_vin_decl(2); sysfs_vin_decl(3); sysfs_vin_decl(4); sysfs_vin_decl(5); sysfs_vin_decl(6); sysfs_vin_decl(7); sysfs_vin_decl(8); /* use a different set of functions for in0 */ static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg) { long in0; if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ in0 = (long)((reg * 488 + 70000 + 50) / 100); else /* use VRM8 (standard) calculation */ in0 = (long)IN_FROM_REG(reg); return sprintf(buf,"%ld\n", in0); } static ssize_t show_regs_in_0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in[0]); } static ssize_t show_regs_in_min0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in_min[0]); } static ssize_t show_regs_in_max0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in_max[0]); } static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ data->in_min[0] = SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0, 255); else /* use VRM8 (standard) calculation */ data->in_min[0] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MIN(0), data->in_min[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ data->in_max[0] = SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0, 255); else /* use VRM8 (standard) calculation */ data->in_max[0] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MAX(0), data->in_max[0]); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(in0_input, S_IRUGO, show_regs_in_0, NULL); static DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, show_regs_in_min0, store_regs_in_min0); static DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, show_regs_in_max0, store_regs_in_max0); static ssize_t show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan[nr], (long)DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan_min[nr], (long)DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t store_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } #define sysfs_fan_decl(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan_input, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, store_fan_min, offset - 1); sysfs_fan_decl(1); sysfs_fan_decl(2); sysfs_fan_decl(3); static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp_max[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp_max_hyst[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u16 tmp; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val); mutex_lock(&data->update_lock); data->temp_max[nr] = tmp; w83627hf_write_value(data, w83627hf_reg_temp_over[nr], tmp); mutex_unlock(&data->update_lock); return count; } static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u16 tmp; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val); mutex_lock(&data->update_lock); data->temp_max_hyst[nr] = tmp; w83627hf_write_value(data, w83627hf_reg_temp_hyst[nr], tmp); mutex_unlock(&data->update_lock); return count; } #define sysfs_temp_decl(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO|S_IWUSR, \ show_temp_max, store_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO|S_IWUSR, \ show_temp_max_hyst, store_temp_max_hyst, offset - 1); sysfs_temp_decl(1); sysfs_temp_decl(2); sysfs_temp_decl(3); static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL); static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%ld\n", (long) data->vrm); } static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg); static ssize_t show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); int bitnr = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16); static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13); static ssize_t show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)BEEP_MASK_FROM_REG(data->beep_mask)); } static ssize_t store_beep_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* preserve beep enable */ data->beep_mask = (data->beep_mask & 0x8000) | BEEP_MASK_TO_REG(val); w83627hf_write_value(data, W83781D_REG_BEEP_INTS1, data->beep_mask & 0xff); w83627hf_write_value(data, W83781D_REG_BEEP_INTS3, ((data->beep_mask) >> 16) & 0xff); w83627hf_write_value(data, W83781D_REG_BEEP_INTS2, (data->beep_mask >> 8) & 0xff); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR, show_beep_mask, store_beep_mask); static ssize_t show_beep(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); int bitnr = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%u\n", (data->beep_mask >> bitnr) & 1); } static ssize_t store_beep(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); int bitnr = to_sensor_dev_attr(attr)->index; u8 reg; unsigned long bit; int err; err = kstrtoul(buf, 10, &bit); if (err) return err; if (bit & ~1) return -EINVAL; mutex_lock(&data->update_lock); if (bit) data->beep_mask |= (1 << bitnr); else data->beep_mask &= ~(1 << bitnr); if (bitnr < 8) { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS1); if (bit) reg |= (1 << bitnr); else reg &= ~(1 << bitnr); w83627hf_write_value(data, W83781D_REG_BEEP_INTS1, reg); } else if (bitnr < 16) { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2); if (bit) reg |= (1 << (bitnr - 8)); else reg &= ~(1 << (bitnr - 8)); w83627hf_write_value(data, W83781D_REG_BEEP_INTS2, reg); } else { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS3); if (bit) reg |= (1 << (bitnr - 16)); else reg &= ~(1 << (bitnr - 16)); w83627hf_write_value(data, W83781D_REG_BEEP_INTS3, reg); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 0); static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 1); static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 2); static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 3); static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 8); static SENSOR_DEVICE_ATTR(in5_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 9); static SENSOR_DEVICE_ATTR(in6_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 10); static SENSOR_DEVICE_ATTR(in7_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 16); static SENSOR_DEVICE_ATTR(in8_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 17); static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 6); static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 7); static SENSOR_DEVICE_ATTR(fan3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 11); static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 4); static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 5); static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 13); static SENSOR_DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR, show_beep, store_beep, 15); static ssize_t show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) DIV_FROM_REG(data->fan_div[nr])); } /* * Note: we save and restore the fan minimum here, because its value is * determined in part by the fan divisor. This follows the principle of * least surprise; the user doesn't expect the fan minimum to change just * because the divisor changed. */ static ssize_t store_fan_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long min; u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* Save fan_min */ min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); data->fan_div[nr] = DIV_TO_REG(val); reg = (w83627hf_read_value(data, nr==2 ? W83781D_REG_PIN : W83781D_REG_VID_FANDIV) & (nr==0 ? 0xcf : 0x3f)) | ((data->fan_div[nr] & 0x03) << (nr==0 ? 4 : 6)); w83627hf_write_value(data, nr==2 ? W83781D_REG_PIN : W83781D_REG_VID_FANDIV, reg); reg = (w83627hf_read_value(data, W83781D_REG_VBAT) & ~(1 << (5 + nr))) | ((data->fan_div[nr] & 0x04) << (3 + nr)); w83627hf_write_value(data, W83781D_REG_VBAT, reg); /* Restore fan_min */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 0); static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 1); static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 2); static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->pwm[nr]); } static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (data->type == w83627thf) { /* bits 0-3 are reserved in 627THF */ data->pwm[nr] = PWM_TO_REG(val) & 0xf0; w83627hf_write_value(data, W836X7HF_REG_PWM(data->type, nr), data->pwm[nr] | (w83627hf_read_value(data, W836X7HF_REG_PWM(data->type, nr)) & 0x0f)); } else { data->pwm[nr] = PWM_TO_REG(val); w83627hf_write_value(data, W836X7HF_REG_PWM(data->type, nr), data->pwm[nr]); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 1); static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 2); static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%d\n", data->pwm_enable[nr]); } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (!val || val > 3) /* modes 1, 2 and 3 are supported */ return -EINVAL; mutex_lock(&data->update_lock); data->pwm_enable[nr] = val; reg = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[nr]); reg &= ~(0x03 << W83627THF_PWM_ENABLE_SHIFT[nr]); reg |= (val - 1) << W83627THF_PWM_ENABLE_SHIFT[nr]; w83627hf_write_value(data, W83627THF_REG_PWM_ENABLE[nr], reg); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 1); static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 2); static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); if (data->type == w83627hf) return sprintf(buf, "%ld\n", pwm_freq_from_reg_627hf(data->pwm_freq[nr])); else return sprintf(buf, "%ld\n", pwm_freq_from_reg(data->pwm_freq[nr])); } static ssize_t store_pwm_freq(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); static const u8 mask[]={0xF8, 0x8F}; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (data->type == w83627hf) { data->pwm_freq[nr] = pwm_freq_to_reg_627hf(val); w83627hf_write_value(data, W83627HF_REG_PWM_FREQ, (data->pwm_freq[nr] << (nr*4)) | (w83627hf_read_value(data, W83627HF_REG_PWM_FREQ) & mask[nr])); } else { data->pwm_freq[nr] = pwm_freq_to_reg(val); w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr], data->pwm_freq[nr]); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 0); static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 1); static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 2); static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->sens[nr]); } static ssize_t store_temp_type(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; u32 tmp; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (val) { case 1: /* PII/Celeron diode */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp | BIT_SCFG1[nr]); tmp = w83627hf_read_value(data, W83781D_REG_SCFG2); w83627hf_write_value(data, W83781D_REG_SCFG2, tmp | BIT_SCFG2[nr]); data->sens[nr] = val; break; case 2: /* 3904 */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp | BIT_SCFG1[nr]); tmp = w83627hf_read_value(data, W83781D_REG_SCFG2); w83627hf_write_value(data, W83781D_REG_SCFG2, tmp & ~BIT_SCFG2[nr]); data->sens[nr] = val; break; case W83781D_DEFAULT_BETA: dev_warn(dev, "Sensor type %d is deprecated, please use 4 " "instead\n", W83781D_DEFAULT_BETA); /* fall through */ case 4: /* thermistor */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp & ~BIT_SCFG1[nr]); data->sens[nr] = val; break; default: dev_err(dev, "Invalid sensor type %ld; must be 1, 2, or 4\n", (long) val); break; } mutex_unlock(&data->update_lock); return count; } #define sysfs_temp_type(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \ show_temp_type, store_temp_type, offset - 1); sysfs_temp_type(1); sysfs_temp_type(2); sysfs_temp_type(3); static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct w83627hf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static int __init w83627hf_find(int sioaddr, unsigned short *addr, struct w83627hf_sio_data *sio_data) { int err = -ENODEV; u16 val; static const __initdata char *names[] = { "W83627HF", "W83627THF", "W83697HF", "W83637HF", "W83687THF", }; sio_data->sioaddr = sioaddr; superio_enter(sio_data); val = force_id ? force_id : superio_inb(sio_data, DEVID); switch (val) { case W627_DEVID: sio_data->type = w83627hf; break; case W627THF_DEVID: sio_data->type = w83627thf; break; case W697_DEVID: sio_data->type = w83697hf; break; case W637_DEVID: sio_data->type = w83637hf; break; case W687THF_DEVID: sio_data->type = w83687thf; break; case 0xff: /* No device at all */ goto exit; default: pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%02x)\n", val); goto exit; } superio_select(sio_data, W83627HF_LD_HWM); val = (superio_inb(sio_data, WINB_BASE_REG) << 8) | superio_inb(sio_data, WINB_BASE_REG + 1); *addr = val & WINB_ALIGNMENT; if (*addr == 0) { pr_warn("Base address not set, skipping\n"); goto exit; } val = superio_inb(sio_data, WINB_ACT_REG); if (!(val & 0x01)) { pr_warn("Enabling HWM logical device\n"); superio_outb(sio_data, WINB_ACT_REG, val | 0x01); } err = 0; pr_info(DRVNAME ": Found %s chip at %#x\n", names[sio_data->type], *addr); exit: superio_exit(sio_data); return err; } #define VIN_UNIT_ATTRS(_X_) \ &sensor_dev_attr_in##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_min.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_max.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_beep.dev_attr.attr #define FAN_UNIT_ATTRS(_X_) \ &sensor_dev_attr_fan##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_min.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_div.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_beep.dev_attr.attr #define TEMP_UNIT_ATTRS(_X_) \ &sensor_dev_attr_temp##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_max.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_max_hyst.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_type.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_beep.dev_attr.attr static struct attribute *w83627hf_attributes[] = { &dev_attr_in0_input.attr, &dev_attr_in0_min.attr, &dev_attr_in0_max.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in0_beep.dev_attr.attr, VIN_UNIT_ATTRS(2), VIN_UNIT_ATTRS(3), VIN_UNIT_ATTRS(4), VIN_UNIT_ATTRS(7), VIN_UNIT_ATTRS(8), FAN_UNIT_ATTRS(1), FAN_UNIT_ATTRS(2), TEMP_UNIT_ATTRS(1), TEMP_UNIT_ATTRS(2), &dev_attr_alarms.attr, &sensor_dev_attr_beep_enable.dev_attr.attr, &dev_attr_beep_mask.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group w83627hf_group = { .attrs = w83627hf_attributes, }; static struct attribute *w83627hf_attributes_opt[] = { VIN_UNIT_ATTRS(1), VIN_UNIT_ATTRS(5), VIN_UNIT_ATTRS(6), FAN_UNIT_ATTRS(3), TEMP_UNIT_ATTRS(3), &sensor_dev_attr_pwm3.dev_attr.attr, &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm3_freq.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, NULL }; static const struct attribute_group w83627hf_group_opt = { .attrs = w83627hf_attributes_opt, }; static int __devinit w83627hf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct w83627hf_sio_data *sio_data = dev->platform_data; struct w83627hf_data *data; struct resource *res; int err, i; static const char *names[] = { "w83627hf", "w83627thf", "w83697hf", "w83637hf", "w83687thf", }; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, WINB_REGION_SIZE, DRVNAME)) { dev_err(dev, "Failed to request region 0x%lx-0x%lx\n", (unsigned long)res->start, (unsigned long)(res->start + WINB_REGION_SIZE - 1)); err = -EBUSY; goto ERROR0; } data = kzalloc(sizeof(struct w83627hf_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto ERROR1; } data->addr = res->start; data->type = sio_data->type; data->name = names[sio_data->type]; mutex_init(&data->lock); mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); /* Initialize the chip */ w83627hf_init_device(pdev); /* A few vars need to be filled upon startup */ for (i = 0; i <= 2; i++) data->fan_min[i] = w83627hf_read_value( data, W83627HF_REG_FAN_MIN(i)); w83627hf_update_fan_div(data); /* Register common device attributes */ err = sysfs_create_group(&dev->kobj, &w83627hf_group); if (err) goto ERROR3; /* Register chip-specific device attributes */ if (data->type == w83627hf || data->type == w83697hf) if ((err = device_create_file(dev, &sensor_dev_attr_in5_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm1_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_freq.dev_attr))) goto ERROR4; if (data->type != w83697hf) if ((err = device_create_file(dev, &sensor_dev_attr_in1_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_div.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_max_hyst.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_type.dev_attr))) goto ERROR4; if (data->type != w83697hf && data->vid != 0xff) { /* Convert VID to voltage based on VRM */ data->vrm = vid_which_vrm(); if ((err = device_create_file(dev, &dev_attr_cpu0_vid)) || (err = device_create_file(dev, &dev_attr_vrm))) goto ERROR4; } if (data->type == w83627thf || data->type == w83637hf || data->type == w83687thf) { err = device_create_file(dev, &sensor_dev_attr_pwm3.dev_attr); if (err) goto ERROR4; } if (data->type == w83637hf || data->type == w83687thf) if ((err = device_create_file(dev, &sensor_dev_attr_pwm1_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm3_freq.dev_attr))) goto ERROR4; if (data->type != w83627hf) if ((err = device_create_file(dev, &sensor_dev_attr_pwm1_enable.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_enable.dev_attr))) goto ERROR4; if (data->type == w83627thf || data->type == w83637hf || data->type == w83687thf) { err = device_create_file(dev, &sensor_dev_attr_pwm3_enable.dev_attr); if (err) goto ERROR4; } data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto ERROR4; } return 0; ERROR4: sysfs_remove_group(&dev->kobj, &w83627hf_group); sysfs_remove_group(&dev->kobj, &w83627hf_group_opt); ERROR3: platform_set_drvdata(pdev, NULL); kfree(data); ERROR1: release_region(res->start, WINB_REGION_SIZE); ERROR0: return err; } static int __devexit w83627hf_remove(struct platform_device *pdev) { struct w83627hf_data *data = platform_get_drvdata(pdev); struct resource *res; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group); sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group_opt); platform_set_drvdata(pdev, NULL); kfree(data); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start, WINB_REGION_SIZE); return 0; } /* Registers 0x50-0x5f are banked */ static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg) { if ((reg & 0x00f0) == 0x50) { outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET); outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET); } } /* Not strictly necessary, but play it safe for now */ static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg) { if (reg & 0xff00) { outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET); outb_p(0, data->addr + W83781D_DATA_REG_OFFSET); } } static int w83627hf_read_value(struct w83627hf_data *data, u16 reg) { int res, word_sized; mutex_lock(&data->lock); word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) && (((reg & 0x00ff) == 0x50) || ((reg & 0x00ff) == 0x53) || ((reg & 0x00ff) == 0x55)); w83627hf_set_bank(data, reg); outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET); res = inb_p(data->addr + W83781D_DATA_REG_OFFSET); if (word_sized) { outb_p((reg & 0xff) + 1, data->addr + W83781D_ADDR_REG_OFFSET); res = (res << 8) + inb_p(data->addr + W83781D_DATA_REG_OFFSET); } w83627hf_reset_bank(data, reg); mutex_unlock(&data->lock); return res; } static int __devinit w83627thf_read_gpio5(struct platform_device *pdev) { struct w83627hf_sio_data *sio_data = pdev->dev.platform_data; int res = 0xff, sel; superio_enter(sio_data); superio_select(sio_data, W83627HF_LD_GPIO5); /* Make sure these GPIO pins are enabled */ if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) { dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n"); goto exit; } /* * Make sure the pins are configured for input * There must be at least five (VRM 9), and possibly 6 (VRM 10) */ sel = superio_inb(sio_data, W83627THF_GPIO5_IOSR) & 0x3f; if ((sel & 0x1f) != 0x1f) { dev_dbg(&pdev->dev, "GPIO5 not configured for VID " "function\n"); goto exit; } dev_info(&pdev->dev, "Reading VID from GPIO5\n"); res = superio_inb(sio_data, W83627THF_GPIO5_DR) & sel; exit: superio_exit(sio_data); return res; } static int __devinit w83687thf_read_vid(struct platform_device *pdev) { struct w83627hf_sio_data *sio_data = pdev->dev.platform_data; int res = 0xff; superio_enter(sio_data); superio_select(sio_data, W83627HF_LD_HWM); /* Make sure these GPIO pins are enabled */ if (!(superio_inb(sio_data, W83687THF_VID_EN) & (1 << 2))) { dev_dbg(&pdev->dev, "VID disabled, no VID function\n"); goto exit; } /* Make sure the pins are configured for input */ if (!(superio_inb(sio_data, W83687THF_VID_CFG) & (1 << 4))) { dev_dbg(&pdev->dev, "VID configured as output, " "no VID function\n"); goto exit; } res = superio_inb(sio_data, W83687THF_VID_DATA) & 0x3f; exit: superio_exit(sio_data); return res; } static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value) { int word_sized; mutex_lock(&data->lock); word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) && (((reg & 0x00ff) == 0x53) || ((reg & 0x00ff) == 0x55)); w83627hf_set_bank(data, reg); outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET); if (word_sized) { outb_p(value >> 8, data->addr + W83781D_DATA_REG_OFFSET); outb_p((reg & 0xff) + 1, data->addr + W83781D_ADDR_REG_OFFSET); } outb_p(value & 0xff, data->addr + W83781D_DATA_REG_OFFSET); w83627hf_reset_bank(data, reg); mutex_unlock(&data->lock); return 0; } static void __devinit w83627hf_init_device(struct platform_device *pdev) { struct w83627hf_data *data = platform_get_drvdata(pdev); int i; enum chips type = data->type; u8 tmp; /* Minimize conflicts with other winbond i2c-only clients... */ /* disable i2c subclients... how to disable main i2c client?? */ /* force i2c address to relatively uncommon address */ w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89); w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c); /* Read VID only once */ if (type == w83627hf || type == w83637hf) { int lo = w83627hf_read_value(data, W83781D_REG_VID_FANDIV); int hi = w83627hf_read_value(data, W83781D_REG_CHIPID); data->vid = (lo & 0x0f) | ((hi & 0x01) << 4); } else if (type == w83627thf) { data->vid = w83627thf_read_gpio5(pdev); } else if (type == w83687thf) { data->vid = w83687thf_read_vid(pdev); } /* Read VRM & OVT Config only once */ if (type == w83627thf || type == w83637hf || type == w83687thf) { data->vrm_ovt = w83627hf_read_value(data, W83627THF_REG_VRM_OVT_CFG); } tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); for (i = 1; i <= 3; i++) { if (!(tmp & BIT_SCFG1[i - 1])) { data->sens[i - 1] = 4; } else { if (w83627hf_read_value (data, W83781D_REG_SCFG2) & BIT_SCFG2[i - 1]) data->sens[i - 1] = 1; else data->sens[i - 1] = 2; } if ((type == w83697hf) && (i == 2)) break; } if(init) { /* Enable temp2 */ tmp = w83627hf_read_value(data, W83627HF_REG_TEMP2_CONFIG); if (tmp & 0x01) { dev_warn(&pdev->dev, "Enabling temp2, readings " "might not make sense\n"); w83627hf_write_value(data, W83627HF_REG_TEMP2_CONFIG, tmp & 0xfe); } /* Enable temp3 */ if (type != w83697hf) { tmp = w83627hf_read_value(data, W83627HF_REG_TEMP3_CONFIG); if (tmp & 0x01) { dev_warn(&pdev->dev, "Enabling temp3, " "readings might not make sense\n"); w83627hf_write_value(data, W83627HF_REG_TEMP3_CONFIG, tmp & 0xfe); } } } /* Start monitoring */ w83627hf_write_value(data, W83781D_REG_CONFIG, (w83627hf_read_value(data, W83781D_REG_CONFIG) & 0xf7) | 0x01); /* Enable VBAT monitoring if needed */ tmp = w83627hf_read_value(data, W83781D_REG_VBAT); if (!(tmp & 0x01)) w83627hf_write_value(data, W83781D_REG_VBAT, tmp | 0x01); } static void w83627hf_update_fan_div(struct w83627hf_data *data) { int reg; reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV); data->fan_div[0] = (reg >> 4) & 0x03; data->fan_div[1] = (reg >> 6) & 0x03; if (data->type != w83697hf) { data->fan_div[2] = (w83627hf_read_value(data, W83781D_REG_PIN) >> 6) & 0x03; } reg = w83627hf_read_value(data, W83781D_REG_VBAT); data->fan_div[0] |= (reg >> 3) & 0x04; data->fan_div[1] |= (reg >> 4) & 0x04; if (data->type != w83697hf) data->fan_div[2] |= (reg >> 5) & 0x04; } static struct w83627hf_data *w83627hf_update_device(struct device *dev) { struct w83627hf_data *data = dev_get_drvdata(dev); int i, num_temps = (data->type == w83697hf) ? 2 : 3; int num_pwms = (data->type == w83697hf) ? 2 : 3; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { for (i = 0; i <= 8; i++) { /* skip missing sensors */ if (((data->type == w83697hf) && (i == 1)) || ((data->type != w83627hf && data->type != w83697hf) && (i == 5 || i == 6))) continue; data->in[i] = w83627hf_read_value(data, W83781D_REG_IN(i)); data->in_min[i] = w83627hf_read_value(data, W83781D_REG_IN_MIN(i)); data->in_max[i] = w83627hf_read_value(data, W83781D_REG_IN_MAX(i)); } for (i = 0; i <= 2; i++) { data->fan[i] = w83627hf_read_value(data, W83627HF_REG_FAN(i)); data->fan_min[i] = w83627hf_read_value(data, W83627HF_REG_FAN_MIN(i)); } for (i = 0; i <= 2; i++) { u8 tmp = w83627hf_read_value(data, W836X7HF_REG_PWM(data->type, i)); /* bits 0-3 are reserved in 627THF */ if (data->type == w83627thf) tmp &= 0xf0; data->pwm[i] = tmp; if (i == 1 && (data->type == w83627hf || data->type == w83697hf)) break; } if (data->type == w83627hf) { u8 tmp = w83627hf_read_value(data, W83627HF_REG_PWM_FREQ); data->pwm_freq[0] = tmp & 0x07; data->pwm_freq[1] = (tmp >> 4) & 0x07; } else if (data->type != w83627thf) { for (i = 1; i <= 3; i++) { data->pwm_freq[i - 1] = w83627hf_read_value(data, W83637HF_REG_PWM_FREQ[i - 1]); if (i == 2 && (data->type == w83697hf)) break; } } if (data->type != w83627hf) { for (i = 0; i < num_pwms; i++) { u8 tmp = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[i]); data->pwm_enable[i] = ((tmp >> W83627THF_PWM_ENABLE_SHIFT[i]) & 0x03) + 1; } } for (i = 0; i < num_temps; i++) { data->temp[i] = w83627hf_read_value( data, w83627hf_reg_temp[i]); data->temp_max[i] = w83627hf_read_value( data, w83627hf_reg_temp_over[i]); data->temp_max_hyst[i] = w83627hf_read_value( data, w83627hf_reg_temp_hyst[i]); } w83627hf_update_fan_div(data); data->alarms = w83627hf_read_value(data, W83781D_REG_ALARM1) | (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) | (w83627hf_read_value(data, W83781D_REG_ALARM3) << 16); i = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2); data->beep_mask = (i << 8) | w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) | w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init w83627hf_device_add(unsigned short address, const struct w83627hf_sio_data *sio_data) { struct resource res = { .start = address + WINB_REGION_OFFSET, .end = address + WINB_REGION_OFFSET + WINB_REGION_SIZE - 1, .name = DRVNAME, .flags = IORESOURCE_IO, }; int err; err = acpi_check_resource_conflict(&res); if (err) goto exit; pdev = platform_device_alloc(DRVNAME, address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add_data(pdev, sio_data, sizeof(struct w83627hf_sio_data)); if (err) { pr_err("Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __init sensors_w83627hf_init(void) { int err; unsigned short address; struct w83627hf_sio_data sio_data; if (w83627hf_find(0x2e, &address, &sio_data) && w83627hf_find(0x4e, &address, &sio_data)) return -ENODEV; err = platform_driver_register(&w83627hf_driver); if (err) goto exit; /* Sets global pdev as a side effect */ err = w83627hf_device_add(address, &sio_data); if (err) goto exit_driver; return 0; exit_driver: platform_driver_unregister(&w83627hf_driver); exit: return err; } static void __exit sensors_w83627hf_exit(void) { platform_device_unregister(pdev); platform_driver_unregister(&w83627hf_driver); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, " "Philip Edelbrock <phil@netroedge.com>, " "and Mark Studebaker <mdsxyz123@yahoo.com>"); MODULE_DESCRIPTION("W83627HF driver"); MODULE_LICENSE("GPL"); module_init(sensors_w83627hf_init); module_exit(sensors_w83627hf_exit);
gpl-2.0
DirtyUnicorns/android_kernel_motorola_msm8960dt-common
drivers/hwmon/w83627hf.c
4847
56094
/* * w83627hf.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring * Copyright (c) 1998 - 2003 Frodo Looijaard <frodol@dds.nl>, * Philip Edelbrock <phil@netroedge.com>, * and Mark Studebaker <mdsxyz123@yahoo.com> * Ported to 2.6 by Bernhard C. Schrenk <clemy@clemy.org> * Copyright (c) 2007 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Supports following chips: * * Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA * w83627hf 9 3 2 3 0x20 0x5ca3 no yes(LPC) * w83627thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) * w83637hf 7 3 3 3 0x80 0x5ca3 no yes(LPC) * w83687thf 7 3 3 3 0x90 0x5ca3 no yes(LPC) * w83697hf 8 2 2 2 0x60 0x5ca3 no yes(LPC) * * For other winbond chips, and for i2c support in the above chips, * use w83781d.c. * * Note: automatic ("cruise") fan control for 697, 637 & 627thf not * supported yet. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/io.h> #include "lm75.h" static struct platform_device *pdev; #define DRVNAME "w83627hf" enum chips { w83627hf, w83627thf, w83697hf, w83637hf, w83687thf }; struct w83627hf_sio_data { enum chips type; int sioaddr; }; static u8 force_i2c = 0x1f; module_param(force_i2c, byte, 0); MODULE_PARM_DESC(force_i2c, "Initialize the i2c address of the sensors"); static bool init = 1; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization"); static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); /* modified from kernel/include/traps.c */ #define DEV 0x07 /* Register: Logical device select */ /* logical device numbers for superio_select (below) */ #define W83627HF_LD_FDC 0x00 #define W83627HF_LD_PRT 0x01 #define W83627HF_LD_UART1 0x02 #define W83627HF_LD_UART2 0x03 #define W83627HF_LD_KBC 0x05 #define W83627HF_LD_CIR 0x06 /* w83627hf only */ #define W83627HF_LD_GAME 0x07 #define W83627HF_LD_MIDI 0x07 #define W83627HF_LD_GPIO1 0x07 #define W83627HF_LD_GPIO5 0x07 /* w83627thf only */ #define W83627HF_LD_GPIO2 0x08 #define W83627HF_LD_GPIO3 0x09 #define W83627HF_LD_GPIO4 0x09 /* w83627thf only */ #define W83627HF_LD_ACPI 0x0a #define W83627HF_LD_HWM 0x0b #define DEVID 0x20 /* Register: Device ID */ #define W83627THF_GPIO5_EN 0x30 /* w83627thf only */ #define W83627THF_GPIO5_IOSR 0xf3 /* w83627thf only */ #define W83627THF_GPIO5_DR 0xf4 /* w83627thf only */ #define W83687THF_VID_EN 0x29 /* w83687thf only */ #define W83687THF_VID_CFG 0xF0 /* w83687thf only */ #define W83687THF_VID_DATA 0xF1 /* w83687thf only */ static inline void superio_outb(struct w83627hf_sio_data *sio, int reg, int val) { outb(reg, sio->sioaddr); outb(val, sio->sioaddr + 1); } static inline int superio_inb(struct w83627hf_sio_data *sio, int reg) { outb(reg, sio->sioaddr); return inb(sio->sioaddr + 1); } static inline void superio_select(struct w83627hf_sio_data *sio, int ld) { outb(DEV, sio->sioaddr); outb(ld, sio->sioaddr + 1); } static inline void superio_enter(struct w83627hf_sio_data *sio) { outb(0x87, sio->sioaddr); outb(0x87, sio->sioaddr); } static inline void superio_exit(struct w83627hf_sio_data *sio) { outb(0xAA, sio->sioaddr); } #define W627_DEVID 0x52 #define W627THF_DEVID 0x82 #define W697_DEVID 0x60 #define W637_DEVID 0x70 #define W687THF_DEVID 0x85 #define WINB_ACT_REG 0x30 #define WINB_BASE_REG 0x60 /* Constants specified below */ /* Alignment of the base address */ #define WINB_ALIGNMENT ~7 /* Offset & size of I/O region we are interested in */ #define WINB_REGION_OFFSET 5 #define WINB_REGION_SIZE 2 /* Where are the sensors address/data registers relative to the region offset */ #define W83781D_ADDR_REG_OFFSET 0 #define W83781D_DATA_REG_OFFSET 1 /* The W83781D registers */ /* The W83782D registers for nr=7,8 are in bank 5 */ #define W83781D_REG_IN_MAX(nr) ((nr < 7) ? (0x2b + (nr) * 2) : \ (0x554 + (((nr) - 7) * 2))) #define W83781D_REG_IN_MIN(nr) ((nr < 7) ? (0x2c + (nr) * 2) : \ (0x555 + (((nr) - 7) * 2))) #define W83781D_REG_IN(nr) ((nr < 7) ? (0x20 + (nr)) : \ (0x550 + (nr) - 7)) /* nr:0-2 for fans:1-3 */ #define W83627HF_REG_FAN_MIN(nr) (0x3b + (nr)) #define W83627HF_REG_FAN(nr) (0x28 + (nr)) #define W83627HF_REG_TEMP2_CONFIG 0x152 #define W83627HF_REG_TEMP3_CONFIG 0x252 /* these are zero-based, unlike config constants above */ static const u16 w83627hf_reg_temp[] = { 0x27, 0x150, 0x250 }; static const u16 w83627hf_reg_temp_hyst[] = { 0x3A, 0x153, 0x253 }; static const u16 w83627hf_reg_temp_over[] = { 0x39, 0x155, 0x255 }; #define W83781D_REG_BANK 0x4E #define W83781D_REG_CONFIG 0x40 #define W83781D_REG_ALARM1 0x459 #define W83781D_REG_ALARM2 0x45A #define W83781D_REG_ALARM3 0x45B #define W83781D_REG_BEEP_CONFIG 0x4D #define W83781D_REG_BEEP_INTS1 0x56 #define W83781D_REG_BEEP_INTS2 0x57 #define W83781D_REG_BEEP_INTS3 0x453 #define W83781D_REG_VID_FANDIV 0x47 #define W83781D_REG_CHIPID 0x49 #define W83781D_REG_WCHIPID 0x58 #define W83781D_REG_CHIPMAN 0x4F #define W83781D_REG_PIN 0x4B #define W83781D_REG_VBAT 0x5D #define W83627HF_REG_PWM1 0x5A #define W83627HF_REG_PWM2 0x5B static const u8 W83627THF_REG_PWM_ENABLE[] = { 0x04, /* FAN 1 mode */ 0x04, /* FAN 2 mode */ 0x12, /* FAN AUX mode */ }; static const u8 W83627THF_PWM_ENABLE_SHIFT[] = { 2, 4, 1 }; #define W83627THF_REG_PWM1 0x01 /* 697HF/637HF/687THF too */ #define W83627THF_REG_PWM2 0x03 /* 697HF/637HF/687THF too */ #define W83627THF_REG_PWM3 0x11 /* 637HF/687THF too */ #define W83627THF_REG_VRM_OVT_CFG 0x18 /* 637HF/687THF too */ static const u8 regpwm_627hf[] = { W83627HF_REG_PWM1, W83627HF_REG_PWM2 }; static const u8 regpwm[] = { W83627THF_REG_PWM1, W83627THF_REG_PWM2, W83627THF_REG_PWM3 }; #define W836X7HF_REG_PWM(type, nr) (((type) == w83627hf) ? \ regpwm_627hf[nr] : regpwm[nr]) #define W83627HF_REG_PWM_FREQ 0x5C /* Only for the 627HF */ #define W83637HF_REG_PWM_FREQ1 0x00 /* 697HF/687THF too */ #define W83637HF_REG_PWM_FREQ2 0x02 /* 697HF/687THF too */ #define W83637HF_REG_PWM_FREQ3 0x10 /* 687THF too */ static const u8 W83637HF_REG_PWM_FREQ[] = { W83637HF_REG_PWM_FREQ1, W83637HF_REG_PWM_FREQ2, W83637HF_REG_PWM_FREQ3 }; #define W83627HF_BASE_PWM_FREQ 46870 #define W83781D_REG_I2C_ADDR 0x48 #define W83781D_REG_I2C_SUBADDR 0x4A /* Sensor selection */ #define W83781D_REG_SCFG1 0x5D static const u8 BIT_SCFG1[] = { 0x02, 0x04, 0x08 }; #define W83781D_REG_SCFG2 0x59 static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 }; #define W83781D_DEFAULT_BETA 3435 /* * Conversions. Limit checking is only done on the TO_REG * variants. Note that you should be a bit careful with which arguments * these macros are called: arguments may be evaluated more than once. * Fixing this is just not worth it. */ #define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8)/16),0,255)) #define IN_FROM_REG(val) ((val) * 16) static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm == 0) return 255; rpm = SENSORS_LIMIT(rpm, 1, 1000000); return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254); } #define TEMP_MIN (-128000) #define TEMP_MAX ( 127000) /* * TEMP: 0.001C/bit (-128C to +127C) * REG: 1C/bit, two's complement */ static u8 TEMP_TO_REG(long temp) { int ntemp = SENSORS_LIMIT(temp, TEMP_MIN, TEMP_MAX); ntemp += (ntemp<0 ? -500 : 500); return (u8)(ntemp / 1000); } static int TEMP_FROM_REG(u8 reg) { return (s8)reg * 1000; } #define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div))) #define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255)) static inline unsigned long pwm_freq_from_reg_627hf(u8 reg) { unsigned long freq; freq = W83627HF_BASE_PWM_FREQ >> reg; return freq; } static inline u8 pwm_freq_to_reg_627hf(unsigned long val) { u8 i; /* * Only 5 dividers (1 2 4 8 16) * Search for the nearest available frequency */ for (i = 0; i < 4; i++) { if (val > (((W83627HF_BASE_PWM_FREQ >> i) + (W83627HF_BASE_PWM_FREQ >> (i+1))) / 2)) break; } return i; } static inline unsigned long pwm_freq_from_reg(u8 reg) { /* Clock bit 8 -> 180 kHz or 24 MHz */ unsigned long clock = (reg & 0x80) ? 180000UL : 24000000UL; reg &= 0x7f; /* This should not happen but anyway... */ if (reg == 0) reg++; return clock / (reg << 8); } static inline u8 pwm_freq_to_reg(unsigned long val) { /* Minimum divider value is 0x01 and maximum is 0x7F */ if (val >= 93750) /* The highest we can do */ return 0x01; if (val >= 720) /* Use 24 MHz clock */ return 24000000UL / (val << 8); if (val < 6) /* The lowest we can do */ return 0xFF; else /* Use 180 kHz clock */ return 0x80 | (180000UL / (val << 8)); } #define BEEP_MASK_FROM_REG(val) ((val) & 0xff7fff) #define BEEP_MASK_TO_REG(val) ((val) & 0xff7fff) #define DIV_FROM_REG(val) (1 << (val)) static inline u8 DIV_TO_REG(long val) { int i; val = SENSORS_LIMIT(val, 1, 128) >> 1; for (i = 0; i < 7; i++) { if (val == 0) break; val >>= 1; } return (u8)i; } /* * For each registered chip, we need to keep some data in memory. * The structure is dynamically allocated. */ struct w83627hf_data { unsigned short addr; const char *name; struct device *hwmon_dev; struct mutex lock; enum chips type; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[9]; /* Register value */ u8 in_max[9]; /* Register value */ u8 in_min[9]; /* Register value */ u8 fan[3]; /* Register value */ u8 fan_min[3]; /* Register value */ u16 temp[3]; /* Register value */ u16 temp_max[3]; /* Register value */ u16 temp_max_hyst[3]; /* Register value */ u8 fan_div[3]; /* Register encoding, shifted right */ u8 vid; /* Register encoding, combined */ u32 alarms; /* Register encoding, combined */ u32 beep_mask; /* Register encoding, combined */ u8 pwm[3]; /* Register value */ u8 pwm_enable[3]; /* 1 = manual * 2 = thermal cruise (also called SmartFan I) * 3 = fan speed cruise */ u8 pwm_freq[3]; /* Register value */ u16 sens[3]; /* 1 = pentium diode; 2 = 3904 diode; * 4 = thermistor */ u8 vrm; u8 vrm_ovt; /* Register value, 627THF/637HF/687THF only */ }; static int w83627hf_probe(struct platform_device *pdev); static int __devexit w83627hf_remove(struct platform_device *pdev); static int w83627hf_read_value(struct w83627hf_data *data, u16 reg); static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value); static void w83627hf_update_fan_div(struct w83627hf_data *data); static struct w83627hf_data *w83627hf_update_device(struct device *dev); static void w83627hf_init_device(struct platform_device *pdev); static struct platform_driver w83627hf_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = w83627hf_probe, .remove = __devexit_p(w83627hf_remove), }; static ssize_t show_in_input(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr])); } static ssize_t show_in_min(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr])); } static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr])); } static ssize_t store_in_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MIN(nr), data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MAX(nr), data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define sysfs_vin_decl(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in_input, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO|S_IWUSR, \ show_in_min, store_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO|S_IWUSR, \ show_in_max, store_in_max, offset); sysfs_vin_decl(1); sysfs_vin_decl(2); sysfs_vin_decl(3); sysfs_vin_decl(4); sysfs_vin_decl(5); sysfs_vin_decl(6); sysfs_vin_decl(7); sysfs_vin_decl(8); /* use a different set of functions for in0 */ static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg) { long in0; if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ in0 = (long)((reg * 488 + 70000 + 50) / 100); else /* use VRM8 (standard) calculation */ in0 = (long)IN_FROM_REG(reg); return sprintf(buf,"%ld\n", in0); } static ssize_t show_regs_in_0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in[0]); } static ssize_t show_regs_in_min0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in_min[0]); } static ssize_t show_regs_in_max0(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return show_in_0(data, buf, data->in_max[0]); } static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ data->in_min[0] = SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0, 255); else /* use VRM8 (standard) calculation */ data->in_min[0] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MIN(0), data->in_min[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if ((data->vrm_ovt & 0x01) && (w83627thf == data->type || w83637hf == data->type || w83687thf == data->type)) /* use VRM9 calculation */ data->in_max[0] = SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0, 255); else /* use VRM8 (standard) calculation */ data->in_max[0] = IN_TO_REG(val); w83627hf_write_value(data, W83781D_REG_IN_MAX(0), data->in_max[0]); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(in0_input, S_IRUGO, show_regs_in_0, NULL); static DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, show_regs_in_min0, store_regs_in_min0); static DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, show_regs_in_max0, store_regs_in_max0); static ssize_t show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan[nr], (long)DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan_min[nr], (long)DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t store_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } #define sysfs_fan_decl(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan_input, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, store_fan_min, offset - 1); sysfs_fan_decl(1); sysfs_fan_decl(2); sysfs_fan_decl(3); static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp_max[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); u16 tmp = data->temp_max_hyst[nr]; return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp) : (long) TEMP_FROM_REG(tmp)); } static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u16 tmp; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val); mutex_lock(&data->update_lock); data->temp_max[nr] = tmp; w83627hf_write_value(data, w83627hf_reg_temp_over[nr], tmp); mutex_unlock(&data->update_lock); return count; } static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u16 tmp; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val); mutex_lock(&data->update_lock); data->temp_max_hyst[nr] = tmp; w83627hf_write_value(data, w83627hf_reg_temp_hyst[nr], tmp); mutex_unlock(&data->update_lock); return count; } #define sysfs_temp_decl(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO|S_IWUSR, \ show_temp_max, store_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO|S_IWUSR, \ show_temp_max_hyst, store_temp_max_hyst, offset - 1); sysfs_temp_decl(1); sysfs_temp_decl(2); sysfs_temp_decl(3); static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL); static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%ld\n", (long) data->vrm); } static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg); static ssize_t show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); int bitnr = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16); static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13); static ssize_t show_beep_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long)BEEP_MASK_FROM_REG(data->beep_mask)); } static ssize_t store_beep_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* preserve beep enable */ data->beep_mask = (data->beep_mask & 0x8000) | BEEP_MASK_TO_REG(val); w83627hf_write_value(data, W83781D_REG_BEEP_INTS1, data->beep_mask & 0xff); w83627hf_write_value(data, W83781D_REG_BEEP_INTS3, ((data->beep_mask) >> 16) & 0xff); w83627hf_write_value(data, W83781D_REG_BEEP_INTS2, (data->beep_mask >> 8) & 0xff); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR, show_beep_mask, store_beep_mask); static ssize_t show_beep(struct device *dev, struct device_attribute *attr, char *buf) { struct w83627hf_data *data = w83627hf_update_device(dev); int bitnr = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%u\n", (data->beep_mask >> bitnr) & 1); } static ssize_t store_beep(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627hf_data *data = dev_get_drvdata(dev); int bitnr = to_sensor_dev_attr(attr)->index; u8 reg; unsigned long bit; int err; err = kstrtoul(buf, 10, &bit); if (err) return err; if (bit & ~1) return -EINVAL; mutex_lock(&data->update_lock); if (bit) data->beep_mask |= (1 << bitnr); else data->beep_mask &= ~(1 << bitnr); if (bitnr < 8) { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS1); if (bit) reg |= (1 << bitnr); else reg &= ~(1 << bitnr); w83627hf_write_value(data, W83781D_REG_BEEP_INTS1, reg); } else if (bitnr < 16) { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2); if (bit) reg |= (1 << (bitnr - 8)); else reg &= ~(1 << (bitnr - 8)); w83627hf_write_value(data, W83781D_REG_BEEP_INTS2, reg); } else { reg = w83627hf_read_value(data, W83781D_REG_BEEP_INTS3); if (bit) reg |= (1 << (bitnr - 16)); else reg &= ~(1 << (bitnr - 16)); w83627hf_write_value(data, W83781D_REG_BEEP_INTS3, reg); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 0); static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 1); static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 2); static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 3); static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 8); static SENSOR_DEVICE_ATTR(in5_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 9); static SENSOR_DEVICE_ATTR(in6_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 10); static SENSOR_DEVICE_ATTR(in7_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 16); static SENSOR_DEVICE_ATTR(in8_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 17); static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 6); static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 7); static SENSOR_DEVICE_ATTR(fan3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 11); static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 4); static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 5); static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep, 13); static SENSOR_DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR, show_beep, store_beep, 15); static ssize_t show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) DIV_FROM_REG(data->fan_div[nr])); } /* * Note: we save and restore the fan minimum here, because its value is * determined in part by the fan divisor. This follows the principle of * least surprise; the user doesn't expect the fan minimum to change just * because the divisor changed. */ static ssize_t store_fan_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long min; u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* Save fan_min */ min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); data->fan_div[nr] = DIV_TO_REG(val); reg = (w83627hf_read_value(data, nr==2 ? W83781D_REG_PIN : W83781D_REG_VID_FANDIV) & (nr==0 ? 0xcf : 0x3f)) | ((data->fan_div[nr] & 0x03) << (nr==0 ? 4 : 6)); w83627hf_write_value(data, nr==2 ? W83781D_REG_PIN : W83781D_REG_VID_FANDIV, reg); reg = (w83627hf_read_value(data, W83781D_REG_VBAT) & ~(1 << (5 + nr))) | ((data->fan_div[nr] & 0x04) << (3 + nr)); w83627hf_write_value(data, W83781D_REG_VBAT, reg); /* Restore fan_min */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 0); static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 1); static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO|S_IWUSR, show_fan_div, store_fan_div, 2); static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->pwm[nr]); } static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (data->type == w83627thf) { /* bits 0-3 are reserved in 627THF */ data->pwm[nr] = PWM_TO_REG(val) & 0xf0; w83627hf_write_value(data, W836X7HF_REG_PWM(data->type, nr), data->pwm[nr] | (w83627hf_read_value(data, W836X7HF_REG_PWM(data->type, nr)) & 0x0f)); } else { data->pwm[nr] = PWM_TO_REG(val); w83627hf_write_value(data, W836X7HF_REG_PWM(data->type, nr), data->pwm[nr]); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 1); static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 2); static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%d\n", data->pwm_enable[nr]); } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (!val || val > 3) /* modes 1, 2 and 3 are supported */ return -EINVAL; mutex_lock(&data->update_lock); data->pwm_enable[nr] = val; reg = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[nr]); reg &= ~(0x03 << W83627THF_PWM_ENABLE_SHIFT[nr]); reg |= (val - 1) << W83627THF_PWM_ENABLE_SHIFT[nr]; w83627hf_write_value(data, W83627THF_REG_PWM_ENABLE[nr], reg); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 0); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 1); static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable, store_pwm_enable, 2); static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); if (data->type == w83627hf) return sprintf(buf, "%ld\n", pwm_freq_from_reg_627hf(data->pwm_freq[nr])); else return sprintf(buf, "%ld\n", pwm_freq_from_reg(data->pwm_freq[nr])); } static ssize_t store_pwm_freq(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); static const u8 mask[]={0xF8, 0x8F}; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (data->type == w83627hf) { data->pwm_freq[nr] = pwm_freq_to_reg_627hf(val); w83627hf_write_value(data, W83627HF_REG_PWM_FREQ, (data->pwm_freq[nr] << (nr*4)) | (w83627hf_read_value(data, W83627HF_REG_PWM_FREQ) & mask[nr])); } else { data->pwm_freq[nr] = pwm_freq_to_reg(val); w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr], data->pwm_freq[nr]); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 0); static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 1); static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO|S_IWUSR, show_pwm_freq, store_pwm_freq, 2); static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = w83627hf_update_device(dev); return sprintf(buf, "%ld\n", (long) data->sens[nr]); } static ssize_t store_temp_type(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int nr = to_sensor_dev_attr(devattr)->index; struct w83627hf_data *data = dev_get_drvdata(dev); unsigned long val; u32 tmp; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (val) { case 1: /* PII/Celeron diode */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp | BIT_SCFG1[nr]); tmp = w83627hf_read_value(data, W83781D_REG_SCFG2); w83627hf_write_value(data, W83781D_REG_SCFG2, tmp | BIT_SCFG2[nr]); data->sens[nr] = val; break; case 2: /* 3904 */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp | BIT_SCFG1[nr]); tmp = w83627hf_read_value(data, W83781D_REG_SCFG2); w83627hf_write_value(data, W83781D_REG_SCFG2, tmp & ~BIT_SCFG2[nr]); data->sens[nr] = val; break; case W83781D_DEFAULT_BETA: dev_warn(dev, "Sensor type %d is deprecated, please use 4 " "instead\n", W83781D_DEFAULT_BETA); /* fall through */ case 4: /* thermistor */ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); w83627hf_write_value(data, W83781D_REG_SCFG1, tmp & ~BIT_SCFG1[nr]); data->sens[nr] = val; break; default: dev_err(dev, "Invalid sensor type %ld; must be 1, 2, or 4\n", (long) val); break; } mutex_unlock(&data->update_lock); return count; } #define sysfs_temp_type(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \ show_temp_type, store_temp_type, offset - 1); sysfs_temp_type(1); sysfs_temp_type(2); sysfs_temp_type(3); static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct w83627hf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static int __init w83627hf_find(int sioaddr, unsigned short *addr, struct w83627hf_sio_data *sio_data) { int err = -ENODEV; u16 val; static const __initdata char *names[] = { "W83627HF", "W83627THF", "W83697HF", "W83637HF", "W83687THF", }; sio_data->sioaddr = sioaddr; superio_enter(sio_data); val = force_id ? force_id : superio_inb(sio_data, DEVID); switch (val) { case W627_DEVID: sio_data->type = w83627hf; break; case W627THF_DEVID: sio_data->type = w83627thf; break; case W697_DEVID: sio_data->type = w83697hf; break; case W637_DEVID: sio_data->type = w83637hf; break; case W687THF_DEVID: sio_data->type = w83687thf; break; case 0xff: /* No device at all */ goto exit; default: pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%02x)\n", val); goto exit; } superio_select(sio_data, W83627HF_LD_HWM); val = (superio_inb(sio_data, WINB_BASE_REG) << 8) | superio_inb(sio_data, WINB_BASE_REG + 1); *addr = val & WINB_ALIGNMENT; if (*addr == 0) { pr_warn("Base address not set, skipping\n"); goto exit; } val = superio_inb(sio_data, WINB_ACT_REG); if (!(val & 0x01)) { pr_warn("Enabling HWM logical device\n"); superio_outb(sio_data, WINB_ACT_REG, val | 0x01); } err = 0; pr_info(DRVNAME ": Found %s chip at %#x\n", names[sio_data->type], *addr); exit: superio_exit(sio_data); return err; } #define VIN_UNIT_ATTRS(_X_) \ &sensor_dev_attr_in##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_min.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_max.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_in##_X_##_beep.dev_attr.attr #define FAN_UNIT_ATTRS(_X_) \ &sensor_dev_attr_fan##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_min.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_div.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_fan##_X_##_beep.dev_attr.attr #define TEMP_UNIT_ATTRS(_X_) \ &sensor_dev_attr_temp##_X_##_input.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_max.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_max_hyst.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_type.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_alarm.dev_attr.attr, \ &sensor_dev_attr_temp##_X_##_beep.dev_attr.attr static struct attribute *w83627hf_attributes[] = { &dev_attr_in0_input.attr, &dev_attr_in0_min.attr, &dev_attr_in0_max.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in0_beep.dev_attr.attr, VIN_UNIT_ATTRS(2), VIN_UNIT_ATTRS(3), VIN_UNIT_ATTRS(4), VIN_UNIT_ATTRS(7), VIN_UNIT_ATTRS(8), FAN_UNIT_ATTRS(1), FAN_UNIT_ATTRS(2), TEMP_UNIT_ATTRS(1), TEMP_UNIT_ATTRS(2), &dev_attr_alarms.attr, &sensor_dev_attr_beep_enable.dev_attr.attr, &dev_attr_beep_mask.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group w83627hf_group = { .attrs = w83627hf_attributes, }; static struct attribute *w83627hf_attributes_opt[] = { VIN_UNIT_ATTRS(1), VIN_UNIT_ATTRS(5), VIN_UNIT_ATTRS(6), FAN_UNIT_ATTRS(3), TEMP_UNIT_ATTRS(3), &sensor_dev_attr_pwm3.dev_attr.attr, &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm3_freq.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, NULL }; static const struct attribute_group w83627hf_group_opt = { .attrs = w83627hf_attributes_opt, }; static int __devinit w83627hf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct w83627hf_sio_data *sio_data = dev->platform_data; struct w83627hf_data *data; struct resource *res; int err, i; static const char *names[] = { "w83627hf", "w83627thf", "w83697hf", "w83637hf", "w83687thf", }; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, WINB_REGION_SIZE, DRVNAME)) { dev_err(dev, "Failed to request region 0x%lx-0x%lx\n", (unsigned long)res->start, (unsigned long)(res->start + WINB_REGION_SIZE - 1)); err = -EBUSY; goto ERROR0; } data = kzalloc(sizeof(struct w83627hf_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto ERROR1; } data->addr = res->start; data->type = sio_data->type; data->name = names[sio_data->type]; mutex_init(&data->lock); mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); /* Initialize the chip */ w83627hf_init_device(pdev); /* A few vars need to be filled upon startup */ for (i = 0; i <= 2; i++) data->fan_min[i] = w83627hf_read_value( data, W83627HF_REG_FAN_MIN(i)); w83627hf_update_fan_div(data); /* Register common device attributes */ err = sysfs_create_group(&dev->kobj, &w83627hf_group); if (err) goto ERROR3; /* Register chip-specific device attributes */ if (data->type == w83627hf || data->type == w83697hf) if ((err = device_create_file(dev, &sensor_dev_attr_in5_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in5_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in6_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm1_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_freq.dev_attr))) goto ERROR4; if (data->type != w83697hf) if ((err = device_create_file(dev, &sensor_dev_attr_in1_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_in1_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_min.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_div.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_fan3_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_input.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_max.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_max_hyst.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_alarm.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_beep.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_temp3_type.dev_attr))) goto ERROR4; if (data->type != w83697hf && data->vid != 0xff) { /* Convert VID to voltage based on VRM */ data->vrm = vid_which_vrm(); if ((err = device_create_file(dev, &dev_attr_cpu0_vid)) || (err = device_create_file(dev, &dev_attr_vrm))) goto ERROR4; } if (data->type == w83627thf || data->type == w83637hf || data->type == w83687thf) { err = device_create_file(dev, &sensor_dev_attr_pwm3.dev_attr); if (err) goto ERROR4; } if (data->type == w83637hf || data->type == w83687thf) if ((err = device_create_file(dev, &sensor_dev_attr_pwm1_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_freq.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm3_freq.dev_attr))) goto ERROR4; if (data->type != w83627hf) if ((err = device_create_file(dev, &sensor_dev_attr_pwm1_enable.dev_attr)) || (err = device_create_file(dev, &sensor_dev_attr_pwm2_enable.dev_attr))) goto ERROR4; if (data->type == w83627thf || data->type == w83637hf || data->type == w83687thf) { err = device_create_file(dev, &sensor_dev_attr_pwm3_enable.dev_attr); if (err) goto ERROR4; } data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto ERROR4; } return 0; ERROR4: sysfs_remove_group(&dev->kobj, &w83627hf_group); sysfs_remove_group(&dev->kobj, &w83627hf_group_opt); ERROR3: platform_set_drvdata(pdev, NULL); kfree(data); ERROR1: release_region(res->start, WINB_REGION_SIZE); ERROR0: return err; } static int __devexit w83627hf_remove(struct platform_device *pdev) { struct w83627hf_data *data = platform_get_drvdata(pdev); struct resource *res; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group); sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group_opt); platform_set_drvdata(pdev, NULL); kfree(data); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start, WINB_REGION_SIZE); return 0; } /* Registers 0x50-0x5f are banked */ static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg) { if ((reg & 0x00f0) == 0x50) { outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET); outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET); } } /* Not strictly necessary, but play it safe for now */ static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg) { if (reg & 0xff00) { outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET); outb_p(0, data->addr + W83781D_DATA_REG_OFFSET); } } static int w83627hf_read_value(struct w83627hf_data *data, u16 reg) { int res, word_sized; mutex_lock(&data->lock); word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) && (((reg & 0x00ff) == 0x50) || ((reg & 0x00ff) == 0x53) || ((reg & 0x00ff) == 0x55)); w83627hf_set_bank(data, reg); outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET); res = inb_p(data->addr + W83781D_DATA_REG_OFFSET); if (word_sized) { outb_p((reg & 0xff) + 1, data->addr + W83781D_ADDR_REG_OFFSET); res = (res << 8) + inb_p(data->addr + W83781D_DATA_REG_OFFSET); } w83627hf_reset_bank(data, reg); mutex_unlock(&data->lock); return res; } static int __devinit w83627thf_read_gpio5(struct platform_device *pdev) { struct w83627hf_sio_data *sio_data = pdev->dev.platform_data; int res = 0xff, sel; superio_enter(sio_data); superio_select(sio_data, W83627HF_LD_GPIO5); /* Make sure these GPIO pins are enabled */ if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) { dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n"); goto exit; } /* * Make sure the pins are configured for input * There must be at least five (VRM 9), and possibly 6 (VRM 10) */ sel = superio_inb(sio_data, W83627THF_GPIO5_IOSR) & 0x3f; if ((sel & 0x1f) != 0x1f) { dev_dbg(&pdev->dev, "GPIO5 not configured for VID " "function\n"); goto exit; } dev_info(&pdev->dev, "Reading VID from GPIO5\n"); res = superio_inb(sio_data, W83627THF_GPIO5_DR) & sel; exit: superio_exit(sio_data); return res; } static int __devinit w83687thf_read_vid(struct platform_device *pdev) { struct w83627hf_sio_data *sio_data = pdev->dev.platform_data; int res = 0xff; superio_enter(sio_data); superio_select(sio_data, W83627HF_LD_HWM); /* Make sure these GPIO pins are enabled */ if (!(superio_inb(sio_data, W83687THF_VID_EN) & (1 << 2))) { dev_dbg(&pdev->dev, "VID disabled, no VID function\n"); goto exit; } /* Make sure the pins are configured for input */ if (!(superio_inb(sio_data, W83687THF_VID_CFG) & (1 << 4))) { dev_dbg(&pdev->dev, "VID configured as output, " "no VID function\n"); goto exit; } res = superio_inb(sio_data, W83687THF_VID_DATA) & 0x3f; exit: superio_exit(sio_data); return res; } static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value) { int word_sized; mutex_lock(&data->lock); word_sized = (((reg & 0xff00) == 0x100) || ((reg & 0xff00) == 0x200)) && (((reg & 0x00ff) == 0x53) || ((reg & 0x00ff) == 0x55)); w83627hf_set_bank(data, reg); outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET); if (word_sized) { outb_p(value >> 8, data->addr + W83781D_DATA_REG_OFFSET); outb_p((reg & 0xff) + 1, data->addr + W83781D_ADDR_REG_OFFSET); } outb_p(value & 0xff, data->addr + W83781D_DATA_REG_OFFSET); w83627hf_reset_bank(data, reg); mutex_unlock(&data->lock); return 0; } static void __devinit w83627hf_init_device(struct platform_device *pdev) { struct w83627hf_data *data = platform_get_drvdata(pdev); int i; enum chips type = data->type; u8 tmp; /* Minimize conflicts with other winbond i2c-only clients... */ /* disable i2c subclients... how to disable main i2c client?? */ /* force i2c address to relatively uncommon address */ w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89); w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c); /* Read VID only once */ if (type == w83627hf || type == w83637hf) { int lo = w83627hf_read_value(data, W83781D_REG_VID_FANDIV); int hi = w83627hf_read_value(data, W83781D_REG_CHIPID); data->vid = (lo & 0x0f) | ((hi & 0x01) << 4); } else if (type == w83627thf) { data->vid = w83627thf_read_gpio5(pdev); } else if (type == w83687thf) { data->vid = w83687thf_read_vid(pdev); } /* Read VRM & OVT Config only once */ if (type == w83627thf || type == w83637hf || type == w83687thf) { data->vrm_ovt = w83627hf_read_value(data, W83627THF_REG_VRM_OVT_CFG); } tmp = w83627hf_read_value(data, W83781D_REG_SCFG1); for (i = 1; i <= 3; i++) { if (!(tmp & BIT_SCFG1[i - 1])) { data->sens[i - 1] = 4; } else { if (w83627hf_read_value (data, W83781D_REG_SCFG2) & BIT_SCFG2[i - 1]) data->sens[i - 1] = 1; else data->sens[i - 1] = 2; } if ((type == w83697hf) && (i == 2)) break; } if(init) { /* Enable temp2 */ tmp = w83627hf_read_value(data, W83627HF_REG_TEMP2_CONFIG); if (tmp & 0x01) { dev_warn(&pdev->dev, "Enabling temp2, readings " "might not make sense\n"); w83627hf_write_value(data, W83627HF_REG_TEMP2_CONFIG, tmp & 0xfe); } /* Enable temp3 */ if (type != w83697hf) { tmp = w83627hf_read_value(data, W83627HF_REG_TEMP3_CONFIG); if (tmp & 0x01) { dev_warn(&pdev->dev, "Enabling temp3, " "readings might not make sense\n"); w83627hf_write_value(data, W83627HF_REG_TEMP3_CONFIG, tmp & 0xfe); } } } /* Start monitoring */ w83627hf_write_value(data, W83781D_REG_CONFIG, (w83627hf_read_value(data, W83781D_REG_CONFIG) & 0xf7) | 0x01); /* Enable VBAT monitoring if needed */ tmp = w83627hf_read_value(data, W83781D_REG_VBAT); if (!(tmp & 0x01)) w83627hf_write_value(data, W83781D_REG_VBAT, tmp | 0x01); } static void w83627hf_update_fan_div(struct w83627hf_data *data) { int reg; reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV); data->fan_div[0] = (reg >> 4) & 0x03; data->fan_div[1] = (reg >> 6) & 0x03; if (data->type != w83697hf) { data->fan_div[2] = (w83627hf_read_value(data, W83781D_REG_PIN) >> 6) & 0x03; } reg = w83627hf_read_value(data, W83781D_REG_VBAT); data->fan_div[0] |= (reg >> 3) & 0x04; data->fan_div[1] |= (reg >> 4) & 0x04; if (data->type != w83697hf) data->fan_div[2] |= (reg >> 5) & 0x04; } static struct w83627hf_data *w83627hf_update_device(struct device *dev) { struct w83627hf_data *data = dev_get_drvdata(dev); int i, num_temps = (data->type == w83697hf) ? 2 : 3; int num_pwms = (data->type == w83697hf) ? 2 : 3; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { for (i = 0; i <= 8; i++) { /* skip missing sensors */ if (((data->type == w83697hf) && (i == 1)) || ((data->type != w83627hf && data->type != w83697hf) && (i == 5 || i == 6))) continue; data->in[i] = w83627hf_read_value(data, W83781D_REG_IN(i)); data->in_min[i] = w83627hf_read_value(data, W83781D_REG_IN_MIN(i)); data->in_max[i] = w83627hf_read_value(data, W83781D_REG_IN_MAX(i)); } for (i = 0; i <= 2; i++) { data->fan[i] = w83627hf_read_value(data, W83627HF_REG_FAN(i)); data->fan_min[i] = w83627hf_read_value(data, W83627HF_REG_FAN_MIN(i)); } for (i = 0; i <= 2; i++) { u8 tmp = w83627hf_read_value(data, W836X7HF_REG_PWM(data->type, i)); /* bits 0-3 are reserved in 627THF */ if (data->type == w83627thf) tmp &= 0xf0; data->pwm[i] = tmp; if (i == 1 && (data->type == w83627hf || data->type == w83697hf)) break; } if (data->type == w83627hf) { u8 tmp = w83627hf_read_value(data, W83627HF_REG_PWM_FREQ); data->pwm_freq[0] = tmp & 0x07; data->pwm_freq[1] = (tmp >> 4) & 0x07; } else if (data->type != w83627thf) { for (i = 1; i <= 3; i++) { data->pwm_freq[i - 1] = w83627hf_read_value(data, W83637HF_REG_PWM_FREQ[i - 1]); if (i == 2 && (data->type == w83697hf)) break; } } if (data->type != w83627hf) { for (i = 0; i < num_pwms; i++) { u8 tmp = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[i]); data->pwm_enable[i] = ((tmp >> W83627THF_PWM_ENABLE_SHIFT[i]) & 0x03) + 1; } } for (i = 0; i < num_temps; i++) { data->temp[i] = w83627hf_read_value( data, w83627hf_reg_temp[i]); data->temp_max[i] = w83627hf_read_value( data, w83627hf_reg_temp_over[i]); data->temp_max_hyst[i] = w83627hf_read_value( data, w83627hf_reg_temp_hyst[i]); } w83627hf_update_fan_div(data); data->alarms = w83627hf_read_value(data, W83781D_REG_ALARM1) | (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) | (w83627hf_read_value(data, W83781D_REG_ALARM3) << 16); i = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2); data->beep_mask = (i << 8) | w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) | w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __init w83627hf_device_add(unsigned short address, const struct w83627hf_sio_data *sio_data) { struct resource res = { .start = address + WINB_REGION_OFFSET, .end = address + WINB_REGION_OFFSET + WINB_REGION_SIZE - 1, .name = DRVNAME, .flags = IORESOURCE_IO, }; int err; err = acpi_check_resource_conflict(&res); if (err) goto exit; pdev = platform_device_alloc(DRVNAME, address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add_data(pdev, sio_data, sizeof(struct w83627hf_sio_data)); if (err) { pr_err("Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __init sensors_w83627hf_init(void) { int err; unsigned short address; struct w83627hf_sio_data sio_data; if (w83627hf_find(0x2e, &address, &sio_data) && w83627hf_find(0x4e, &address, &sio_data)) return -ENODEV; err = platform_driver_register(&w83627hf_driver); if (err) goto exit; /* Sets global pdev as a side effect */ err = w83627hf_device_add(address, &sio_data); if (err) goto exit_driver; return 0; exit_driver: platform_driver_unregister(&w83627hf_driver); exit: return err; } static void __exit sensors_w83627hf_exit(void) { platform_device_unregister(pdev); platform_driver_unregister(&w83627hf_driver); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, " "Philip Edelbrock <phil@netroedge.com>, " "and Mark Studebaker <mdsxyz123@yahoo.com>"); MODULE_DESCRIPTION("W83627HF driver"); MODULE_LICENSE("GPL"); module_init(sensors_w83627hf_init); module_exit(sensors_w83627hf_exit);
gpl-2.0
Hundsbuah/Note3_Samsung_Source_Drops
drivers/scsi/mac53c94.c
8431
15374
/* * SCSI low-level driver for the 53c94 SCSI bus adaptor found * on Power Macintosh computers, controlling the external SCSI chain. * We assume the 53c94 is connected to a DBDMA (descriptor-based DMA) * controller. * * Paul Mackerras, August 1996. * Copyright (C) 1996 Paul Mackerras. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/macio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "mac53c94.h" enum fsc_phase { idle, selecting, dataing, completing, busfreeing, }; struct fsc_state { struct mac53c94_regs __iomem *regs; int intr; struct dbdma_regs __iomem *dma; int dmaintr; int clk_freq; struct Scsi_Host *host; struct scsi_cmnd *request_q; struct scsi_cmnd *request_qtail; struct scsi_cmnd *current_req; /* req we're currently working on */ enum fsc_phase phase; /* what we're currently trying to do */ struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ void *dma_cmd_space; struct pci_dev *pdev; dma_addr_t dma_addr; struct macio_dev *mdev; }; static void mac53c94_init(struct fsc_state *); static void mac53c94_start(struct fsc_state *); static void mac53c94_interrupt(int, void *); static irqreturn_t do_mac53c94_interrupt(int, void *); static void cmd_done(struct fsc_state *, int result); static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct fsc_state *state; #if 0 if (cmd->sc_data_direction == DMA_TO_DEVICE) { int i; printk(KERN_DEBUG "mac53c94_queue %p: command is", cmd); for (i = 0; i < cmd->cmd_len; ++i) printk(KERN_CONT " %.2x", cmd->cmnd[i]); printk(KERN_CONT "\n"); printk(KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd)); } #endif cmd->scsi_done = done; cmd->host_scribble = NULL; state = (struct fsc_state *) cmd->device->host->hostdata; if (state->request_q == NULL) state->request_q = cmd; else state->request_qtail->host_scribble = (void *) cmd; state->request_qtail = cmd; if (state->phase == idle) mac53c94_start(state); return 0; } static DEF_SCSI_QCMD(mac53c94_queue) static int mac53c94_host_reset(struct scsi_cmnd *cmd) { struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; unsigned long flags; spin_lock_irqsave(cmd->device->host->host_lock, flags); writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); writeb(CMD_SCSI_RESET, &regs->command); /* assert RST */ udelay(100); /* leave it on for a while (>= 25us) */ writeb(CMD_RESET, &regs->command); udelay(20); mac53c94_init(state); writeb(CMD_NOP, &regs->command); spin_unlock_irqrestore(cmd->device->host->host_lock, flags); return SUCCESS; } static void mac53c94_init(struct fsc_state *state) { struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; int x; writeb(state->host->this_id | CF1_PAR_ENABLE, &regs->config1); writeb(TIMO_VAL(250), &regs->sel_timeout); /* 250ms */ writeb(CLKF_VAL(state->clk_freq), &regs->clk_factor); writeb(CF2_FEATURE_EN, &regs->config2); writeb(0, &regs->config3); writeb(0, &regs->sync_period); writeb(0, &regs->sync_offset); x = readb(&regs->interrupt); writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control); } /* * Start the next command for a 53C94. * Should be called with interrupts disabled. */ static void mac53c94_start(struct fsc_state *state) { struct scsi_cmnd *cmd; struct mac53c94_regs __iomem *regs = state->regs; int i; if (state->phase != idle || state->current_req != NULL) panic("inappropriate mac53c94_start (state=%p)", state); if (state->request_q == NULL) return; state->current_req = cmd = state->request_q; state->request_q = (struct scsi_cmnd *) cmd->host_scribble; /* Off we go */ writeb(0, &regs->count_lo); writeb(0, &regs->count_mid); writeb(0, &regs->count_hi); writeb(CMD_NOP + CMD_DMA_MODE, &regs->command); udelay(1); writeb(CMD_FLUSH, &regs->command); udelay(1); writeb(cmd->device->id, &regs->dest_id); writeb(0, &regs->sync_period); writeb(0, &regs->sync_offset); /* load the command into the FIFO */ for (i = 0; i < cmd->cmd_len; ++i) writeb(cmd->cmnd[i], &regs->fifo); /* do select without ATN XXX */ writeb(CMD_SELECT, &regs->command); state->phase = selecting; set_dma_cmds(state, cmd); } static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id) { unsigned long flags; struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->device->host; spin_lock_irqsave(dev->host_lock, flags); mac53c94_interrupt(irq, dev_id); spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; } static void mac53c94_interrupt(int irq, void *dev_id) { struct fsc_state *state = (struct fsc_state *) dev_id; struct mac53c94_regs __iomem *regs = state->regs; struct dbdma_regs __iomem *dma = state->dma; struct scsi_cmnd *cmd = state->current_req; int nb, stat, seq, intr; static int mac53c94_errors; /* * Apparently, reading the interrupt register unlatches * the status and sequence step registers. */ seq = readb(&regs->seqstep); stat = readb(&regs->status); intr = readb(&regs->interrupt); #if 0 printk(KERN_DEBUG "mac53c94_intr, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); #endif if (intr & INTR_RESET) { /* SCSI bus was reset */ printk(KERN_INFO "external SCSI bus reset detected\n"); writeb(CMD_NOP, &regs->command); writel(RUN << 16, &dma->control); /* stop dma */ cmd_done(state, DID_RESET << 16); return; } if (intr & INTR_ILL_CMD) { printk(KERN_ERR "53c94: invalid cmd, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); cmd_done(state, DID_ERROR << 16); return; } if (stat & STAT_ERROR) { #if 0 /* XXX these seem to be harmless? */ printk("53c94: bad error, intr=%x stat=%x seq=%x phase=%d\n", intr, stat, seq, state->phase); #endif ++mac53c94_errors; writeb(CMD_NOP + CMD_DMA_MODE, &regs->command); } if (cmd == 0) { printk(KERN_DEBUG "53c94: interrupt with no command active?\n"); return; } if (stat & STAT_PARITY) { printk(KERN_ERR "mac53c94: parity error\n"); cmd_done(state, DID_PARITY << 16); return; } switch (state->phase) { case selecting: if (intr & INTR_DISCONNECT) { /* selection timed out */ cmd_done(state, DID_BAD_TARGET << 16); return; } if (intr != INTR_BUS_SERV + INTR_DONE) { printk(KERN_DEBUG "got intr %x during selection\n", intr); cmd_done(state, DID_ERROR << 16); return; } if ((seq & SS_MASK) != SS_DONE) { printk(KERN_DEBUG "seq step %x after command\n", seq); cmd_done(state, DID_ERROR << 16); return; } writeb(CMD_NOP, &regs->command); /* set DMA controller going if any data to transfer */ if ((stat & (STAT_MSG|STAT_CD)) == 0 && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) { nb = cmd->SCp.this_residual; if (nb > 0xfff0) nb = 0xfff0; cmd->SCp.this_residual -= nb; writeb(nb, &regs->count_lo); writeb(nb >> 8, &regs->count_mid); writeb(CMD_DMA_MODE + CMD_NOP, &regs->command); writel(virt_to_phys(state->dma_cmds), &dma->cmdptr); writel((RUN << 16) | RUN, &dma->control); writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command); state->phase = dataing; break; } else if ((stat & STAT_PHASE) == STAT_CD + STAT_IO) { /* up to status phase already */ writeb(CMD_I_COMPLETE, &regs->command); state->phase = completing; } else { printk(KERN_DEBUG "in unexpected phase %x after cmd\n", stat & STAT_PHASE); cmd_done(state, DID_ERROR << 16); return; } break; case dataing: if (intr != INTR_BUS_SERV) { printk(KERN_DEBUG "got intr %x before status\n", intr); cmd_done(state, DID_ERROR << 16); return; } if (cmd->SCp.this_residual != 0 && (stat & (STAT_MSG|STAT_CD)) == 0) { /* Set up the count regs to transfer more */ nb = cmd->SCp.this_residual; if (nb > 0xfff0) nb = 0xfff0; cmd->SCp.this_residual -= nb; writeb(nb, &regs->count_lo); writeb(nb >> 8, &regs->count_mid); writeb(CMD_DMA_MODE + CMD_NOP, &regs->command); writeb(CMD_DMA_MODE + CMD_XFER_DATA, &regs->command); break; } if ((stat & STAT_PHASE) != STAT_CD + STAT_IO) { printk(KERN_DEBUG "intr %x before data xfer complete\n", intr); } writel(RUN << 16, &dma->control); /* stop dma */ scsi_dma_unmap(cmd); /* should check dma status */ writeb(CMD_I_COMPLETE, &regs->command); state->phase = completing; break; case completing: if (intr != INTR_DONE) { printk(KERN_DEBUG "got intr %x on completion\n", intr); cmd_done(state, DID_ERROR << 16); return; } cmd->SCp.Status = readb(&regs->fifo); cmd->SCp.Message = readb(&regs->fifo); cmd->result = CMD_ACCEPT_MSG; writeb(CMD_ACCEPT_MSG, &regs->command); state->phase = busfreeing; break; case busfreeing: if (intr != INTR_DISCONNECT) { printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr); } cmd_done(state, (DID_OK << 16) + (cmd->SCp.Message << 8) + cmd->SCp.Status); break; default: printk(KERN_DEBUG "don't know about phase %d\n", state->phase); } } static void cmd_done(struct fsc_state *state, int result) { struct scsi_cmnd *cmd; cmd = state->current_req; if (cmd != 0) { cmd->result = result; (*cmd->scsi_done)(cmd); state->current_req = NULL; } state->phase = idle; mac53c94_start(state); } /* * Set up DMA commands for transferring data. */ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) { int i, dma_cmd, total, nseg; struct scatterlist *scl; struct dbdma_cmd *dcmds; dma_addr_t dma_addr; u32 dma_len; nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (!nseg) return; dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ? OUTPUT_MORE : INPUT_MORE; dcmds = state->dma_cmds; total = 0; scsi_for_each_sg(cmd, scl, nseg, i) { dma_addr = sg_dma_address(scl); dma_len = sg_dma_len(scl); if (dma_len > 0xffff) panic("mac53c94: scatterlist element >= 64k"); total += dma_len; st_le16(&dcmds->req_count, dma_len); st_le16(&dcmds->command, dma_cmd); st_le32(&dcmds->phy_addr, dma_addr); dcmds->xfer_status = 0; ++dcmds; } dma_cmd += OUTPUT_LAST - OUTPUT_MORE; st_le16(&dcmds[-1].command, dma_cmd); st_le16(&dcmds->command, DBDMA_STOP); cmd->SCp.this_residual = total; } static struct scsi_host_template mac53c94_template = { .proc_name = "53c94", .name = "53C94", .queuecommand = mac53c94_queue, .eh_host_reset_handler = mac53c94_host_reset, .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) { struct device_node *node = macio_get_of_node(mdev); struct pci_dev *pdev = macio_get_pci_dev(mdev); struct fsc_state *state; struct Scsi_Host *host; void *dma_cmd_space; const unsigned char *clkprop; int proplen, rc = -ENODEV; if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { printk(KERN_ERR "mac53c94: expected 2 addrs and intrs" " (got %d/%d)\n", macio_resource_count(mdev), macio_irq_count(mdev)); return -ENODEV; } if (macio_request_resources(mdev, "mac53c94") != 0) { printk(KERN_ERR "mac53c94: unable to request memory resources"); return -EBUSY; } host = scsi_host_alloc(&mac53c94_template, sizeof(struct fsc_state)); if (host == NULL) { printk(KERN_ERR "mac53c94: couldn't register host"); rc = -ENOMEM; goto out_release; } state = (struct fsc_state *) host->hostdata; macio_set_drvdata(mdev, state); state->host = host; state->pdev = pdev; state->mdev = mdev; state->regs = (struct mac53c94_regs __iomem *) ioremap(macio_resource_start(mdev, 0), 0x1000); state->intr = macio_irq(mdev, 0); state->dma = (struct dbdma_regs __iomem *) ioremap(macio_resource_start(mdev, 1), 0x1000); state->dmaintr = macio_irq(mdev, 1); if (state->regs == NULL || state->dma == NULL) { printk(KERN_ERR "mac53c94: ioremap failed for %s\n", node->full_name); goto out_free; } clkprop = of_get_property(node, "clock-frequency", &proplen); if (clkprop == NULL || proplen != sizeof(int)) { printk(KERN_ERR "%s: can't get clock frequency, " "assuming 25MHz\n", node->full_name); state->clk_freq = 25000000; } else state->clk_freq = *(int *)clkprop; /* Space for dma command list: +1 for stop command, * +1 to allow for aligning. * XXX FIXME: Use DMA consistent routines */ dma_cmd_space = kmalloc((host->sg_tablesize + 2) * sizeof(struct dbdma_cmd), GFP_KERNEL); if (dma_cmd_space == 0) { printk(KERN_ERR "mac53c94: couldn't allocate dma " "command space for %s\n", node->full_name); rc = -ENOMEM; goto out_free; } state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space); memset(state->dma_cmds, 0, (host->sg_tablesize + 1) * sizeof(struct dbdma_cmd)); state->dma_cmd_space = dma_cmd_space; mac53c94_init(state); if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) { printk(KERN_ERR "mac53C94: can't get irq %d for %s\n", state->intr, node->full_name); goto out_free_dma; } rc = scsi_add_host(host, &mdev->ofdev.dev); if (rc != 0) goto out_release_irq; scsi_scan_host(host); return 0; out_release_irq: free_irq(state->intr, state); out_free_dma: kfree(state->dma_cmd_space); out_free: if (state->dma != NULL) iounmap(state->dma); if (state->regs != NULL) iounmap(state->regs); scsi_host_put(host); out_release: macio_release_resources(mdev); return rc; } static int mac53c94_remove(struct macio_dev *mdev) { struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev); struct Scsi_Host *host = fp->host; scsi_remove_host(host); free_irq(fp->intr, fp); if (fp->regs) iounmap(fp->regs); if (fp->dma) iounmap(fp->dma); kfree(fp->dma_cmd_space); scsi_host_put(host); macio_release_resources(mdev); return 0; } static struct of_device_id mac53c94_match[] = { { .name = "53c94", }, {}, }; MODULE_DEVICE_TABLE (of, mac53c94_match); static struct macio_driver mac53c94_driver = { .driver = { .name = "mac53c94", .owner = THIS_MODULE, .of_match_table = mac53c94_match, }, .probe = mac53c94_probe, .remove = mac53c94_remove, }; static int __init init_mac53c94(void) { return macio_register_driver(&mac53c94_driver); } static void __exit exit_mac53c94(void) { return macio_unregister_driver(&mac53c94_driver); } module_init(init_mac53c94); module_exit(exit_mac53c94); MODULE_DESCRIPTION("PowerMac 53c94 SCSI driver"); MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>"); MODULE_LICENSE("GPL");
gpl-2.0
curbthepain/us990_kernel
drivers/tty/serial/8250/8250_hp300.c
9199
7821
/* * Driver for the 98626/98644/internal serial interface on hp300/hp400 * (based on the National Semiconductor INS8250/NS16550AF/WD16C552 UARTs) * * Ported from 2.2 and modified to use the normal 8250 driver * by Kars de Jong <jongk@linux-m68k.org>, May 2004. */ #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> #include <linux/delay.h> #include <linux/dio.h> #include <linux/console.h> #include <linux/slab.h> #include <asm/io.h> #include "8250.h" #if !defined(CONFIG_HPDCA) && !defined(CONFIG_HPAPCI) #warning CONFIG_8250 defined but neither CONFIG_HPDCA nor CONFIG_HPAPCI defined, are you sure? #endif #ifdef CONFIG_HPAPCI struct hp300_port { struct hp300_port *next; /* next port */ int line; /* line (tty) number */ }; static struct hp300_port *hp300_ports; #endif #ifdef CONFIG_HPDCA static int __devinit hpdca_init_one(struct dio_dev *d, const struct dio_device_id *ent); static void __devexit hpdca_remove_one(struct dio_dev *d); static struct dio_device_id hpdca_dio_tbl[] = { { DIO_ID_DCA0 }, { DIO_ID_DCA0REM }, { DIO_ID_DCA1 }, { DIO_ID_DCA1REM }, { 0 } }; static struct dio_driver hpdca_driver = { .name = "hpdca", .id_table = hpdca_dio_tbl, .probe = hpdca_init_one, .remove = __devexit_p(hpdca_remove_one), }; #endif static unsigned int num_ports; extern int hp300_uart_scode; /* Offset to UART registers from base of DCA */ #define UART_OFFSET 17 #define DCA_ID 0x01 /* ID (read), reset (write) */ #define DCA_IC 0x03 /* Interrupt control */ /* Interrupt control */ #define DCA_IC_IE 0x80 /* Master interrupt enable */ #define HPDCA_BAUD_BASE 153600 /* Base address of the Frodo part */ #define FRODO_BASE (0x41c000) /* * Where we find the 8250-like APCI ports, and how far apart they are. */ #define FRODO_APCIBASE 0x0 #define FRODO_APCISPACE 0x20 #define FRODO_APCI_OFFSET(x) (FRODO_APCIBASE + ((x) * FRODO_APCISPACE)) #define HPAPCI_BAUD_BASE 500400 #ifdef CONFIG_SERIAL_8250_CONSOLE /* * Parse the bootinfo to find descriptions for headless console and * debug serial ports and register them with the 8250 driver. * This function should be called before serial_console_init() is called * to make sure the serial console will be available for use. IA-64 kernel * calls this function from setup_arch() after the EFI and ACPI tables have * been parsed. */ int __init hp300_setup_serial_console(void) { int scode; struct uart_port port; memset(&port, 0, sizeof(port)); if (hp300_uart_scode < 0 || hp300_uart_scode > DIO_SCMAX) return 0; if (DIO_SCINHOLE(hp300_uart_scode)) return 0; scode = hp300_uart_scode; /* Memory mapped I/O */ port.iotype = UPIO_MEM; port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF; port.type = PORT_UNKNOWN; /* Check for APCI console */ if (scode == 256) { #ifdef CONFIG_HPAPCI printk(KERN_INFO "Serial console is HP APCI 1\n"); port.uartclk = HPAPCI_BAUD_BASE * 16; port.mapbase = (FRODO_BASE + FRODO_APCI_OFFSET(1)); port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE); port.regshift = 2; add_preferred_console("ttyS", port.line, "9600n8"); #else printk(KERN_WARNING "Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n"); return 0; #endif } else { #ifdef CONFIG_HPDCA unsigned long pa = dio_scodetophysaddr(scode); if (!pa) return 0; printk(KERN_INFO "Serial console is HP DCA at select code %d\n", scode); port.uartclk = HPDCA_BAUD_BASE * 16; port.mapbase = (pa + UART_OFFSET); port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE); port.regshift = 1; port.irq = DIO_IPL(pa + DIO_VIRADDRBASE); /* Enable board-interrupts */ out_8(pa + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE); if (DIO_ID(pa + DIO_VIRADDRBASE) & 0x80) add_preferred_console("ttyS", port.line, "9600n8"); #else printk(KERN_WARNING "Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n"); return 0; #endif } if (early_serial_setup(&port) < 0) printk(KERN_WARNING "hp300_setup_serial_console(): early_serial_setup() failed.\n"); return 0; } #endif /* CONFIG_SERIAL_8250_CONSOLE */ #ifdef CONFIG_HPDCA static int __devinit hpdca_init_one(struct dio_dev *d, const struct dio_device_id *ent) { struct uart_port port; int line; #ifdef CONFIG_SERIAL_8250_CONSOLE if (hp300_uart_scode == d->scode) { /* Already got it. */ return 0; } #endif memset(&port, 0, sizeof(struct uart_port)); /* Memory mapped I/O */ port.iotype = UPIO_MEM; port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF; port.irq = d->ipl; port.uartclk = HPDCA_BAUD_BASE * 16; port.mapbase = (d->resource.start + UART_OFFSET); port.membase = (char *)(port.mapbase + DIO_VIRADDRBASE); port.regshift = 1; port.dev = &d->dev; line = serial8250_register_port(&port); if (line < 0) { printk(KERN_NOTICE "8250_hp300: register_serial() DCA scode %d" " irq %d failed\n", d->scode, port.irq); return -ENOMEM; } /* Enable board-interrupts */ out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE); dio_set_drvdata(d, (void *)line); /* Reset the DCA */ out_8(d->resource.start + DIO_VIRADDRBASE + DCA_ID, 0xff); udelay(100); num_ports++; return 0; } #endif static int __init hp300_8250_init(void) { static int called; #ifdef CONFIG_HPAPCI int line; unsigned long base; struct uart_port uport; struct hp300_port *port; int i; #endif if (called) return -ENODEV; called = 1; if (!MACH_IS_HP300) return -ENODEV; #ifdef CONFIG_HPDCA dio_register_driver(&hpdca_driver); #endif #ifdef CONFIG_HPAPCI if (hp300_model < HP_400) { if (!num_ports) return -ENODEV; return 0; } /* These models have the Frodo chip. * Port 0 is reserved for the Apollo Domain keyboard. * Port 1 is either the console or the DCA. */ for (i = 1; i < 4; i++) { /* Port 1 is the console on a 425e, on other machines it's * mapped to DCA. */ #ifdef CONFIG_SERIAL_8250_CONSOLE if (i == 1) continue; #endif /* Create new serial device */ port = kmalloc(sizeof(struct hp300_port), GFP_KERNEL); if (!port) return -ENOMEM; memset(&uport, 0, sizeof(struct uart_port)); base = (FRODO_BASE + FRODO_APCI_OFFSET(i)); /* Memory mapped I/O */ uport.iotype = UPIO_MEM; uport.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ \ | UPF_BOOT_AUTOCONF; /* XXX - no interrupt support yet */ uport.irq = 0; uport.uartclk = HPAPCI_BAUD_BASE * 16; uport.mapbase = base; uport.membase = (char *)(base + DIO_VIRADDRBASE); uport.regshift = 2; line = serial8250_register_port(&uport); if (line < 0) { printk(KERN_NOTICE "8250_hp300: register_serial() APCI" " %d irq %d failed\n", i, uport.irq); kfree(port); continue; } port->line = line; port->next = hp300_ports; hp300_ports = port; num_ports++; } #endif /* Any boards found? */ if (!num_ports) return -ENODEV; return 0; } #ifdef CONFIG_HPDCA static void __devexit hpdca_remove_one(struct dio_dev *d) { int line; line = (int) dio_get_drvdata(d); if (d->resource.start) { /* Disable board-interrupts */ out_8(d->resource.start + DIO_VIRADDRBASE + DCA_IC, 0); } serial8250_unregister_port(line); } #endif static void __exit hp300_8250_exit(void) { #ifdef CONFIG_HPAPCI struct hp300_port *port, *to_free; for (port = hp300_ports; port; ) { serial8250_unregister_port(port->line); to_free = port; port = port->next; kfree(to_free); } hp300_ports = NULL; #endif #ifdef CONFIG_HPDCA dio_unregister_driver(&hpdca_driver); #endif } module_init(hp300_8250_init); module_exit(hp300_8250_exit); MODULE_DESCRIPTION("HP DCA/APCI serial driver"); MODULE_AUTHOR("Kars de Jong <jongk@linux-m68k.org>"); MODULE_LICENSE("GPL");
gpl-2.0
sakuraba001/android_kernel_samsung_js01lte
arch/arm/mach-w90x900/nuc910.c
13039
1298
/* * linux/arch/arm/mach-w90x900/nuc910.c * * Based on linux/arch/arm/plat-s3c24xx/s3c244x.c by Ben Dooks * * Copyright (c) 2009 Nuvoton corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * NUC910 cpu support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/platform_device.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include "cpu.h" #include "clock.h" /* define specific CPU platform device */ static struct platform_device *nuc910_dev[] __initdata = { &nuc900_device_ts, &nuc900_device_rtc, &nuc900_device_lcd, &nuc900_device_kpi, }; /* define specific CPU platform io map */ static struct map_desc nuc910evb_iodesc[] __initdata = { IODESC_ENT(USBEHCIHOST), IODESC_ENT(USBOHCIHOST), IODESC_ENT(KPI), IODESC_ENT(USBDEV), IODESC_ENT(ADC), }; /*Init NUC910 evb io*/ void __init nuc910_map_io(void) { nuc900_map_io(nuc910evb_iodesc, ARRAY_SIZE(nuc910evb_iodesc)); } /*Init NUC910 clock*/ void __init nuc910_init_clocks(void) { nuc900_init_clocks(); } /*Init NUC910 board info*/ void __init nuc910_board_init(void) { nuc900_board_init(nuc910_dev, ARRAY_SIZE(nuc910_dev)); }
gpl-2.0
gearslam/Ak-xGenesis-gee-SLIMPort
arch/arm/nwfpe/fpa11_cpdt.c
14831
9073
/* NetWinder Floating Point Emulator (c) Rebel.com, 1998-1999 (c) Philip Blundell, 1998, 2001 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "fpa11.h" #include "softfloat.h" #include "fpopcode.h" #include "fpmodule.h" #include "fpmodule.inl" #include <linux/uaccess.h> static inline void loadSingle(const unsigned int Fn, const unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); fpa11->fType[Fn] = typeSingle; get_user(fpa11->fpreg[Fn].fSingle, pMem); } static inline void loadDouble(const unsigned int Fn, const unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); unsigned int *p; p = (unsigned int *) &fpa11->fpreg[Fn].fDouble; fpa11->fType[Fn] = typeDouble; #ifdef __ARMEB__ get_user(p[0], &pMem[0]); /* sign & exponent */ get_user(p[1], &pMem[1]); #else get_user(p[0], &pMem[1]); get_user(p[1], &pMem[0]); /* sign & exponent */ #endif } #ifdef CONFIG_FPE_NWFPE_XP static inline void loadExtended(const unsigned int Fn, const unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); unsigned int *p; p = (unsigned int *) &fpa11->fpreg[Fn].fExtended; fpa11->fType[Fn] = typeExtended; get_user(p[0], &pMem[0]); /* sign & exponent */ #ifdef __ARMEB__ get_user(p[1], &pMem[1]); /* ms bits */ get_user(p[2], &pMem[2]); /* ls bits */ #else get_user(p[1], &pMem[2]); /* ls bits */ get_user(p[2], &pMem[1]); /* ms bits */ #endif } #endif static inline void loadMultiple(const unsigned int Fn, const unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); register unsigned int *p; unsigned long x; p = (unsigned int *) &(fpa11->fpreg[Fn]); get_user(x, &pMem[0]); fpa11->fType[Fn] = (x >> 14) & 0x00000003; switch (fpa11->fType[Fn]) { case typeSingle: case typeDouble: { get_user(p[0], &pMem[2]); /* Single */ get_user(p[1], &pMem[1]); /* double msw */ p[2] = 0; /* empty */ } break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: { get_user(p[1], &pMem[2]); get_user(p[2], &pMem[1]); /* msw */ p[0] = (x & 0x80003fff); } break; #endif } } static inline void storeSingle(struct roundingData *roundData, const unsigned int Fn, unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); union { float32 f; unsigned int i[1]; } val; switch (fpa11->fType[Fn]) { case typeDouble: val.f = float64_to_float32(roundData, fpa11->fpreg[Fn].fDouble); break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: val.f = floatx80_to_float32(roundData, fpa11->fpreg[Fn].fExtended); break; #endif default: val.f = fpa11->fpreg[Fn].fSingle; } put_user(val.i[0], pMem); } static inline void storeDouble(struct roundingData *roundData, const unsigned int Fn, unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); union { float64 f; unsigned int i[2]; } val; switch (fpa11->fType[Fn]) { case typeSingle: val.f = float32_to_float64(fpa11->fpreg[Fn].fSingle); break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: val.f = floatx80_to_float64(roundData, fpa11->fpreg[Fn].fExtended); break; #endif default: val.f = fpa11->fpreg[Fn].fDouble; } #ifdef __ARMEB__ put_user(val.i[0], &pMem[0]); /* msw */ put_user(val.i[1], &pMem[1]); /* lsw */ #else put_user(val.i[1], &pMem[0]); /* msw */ put_user(val.i[0], &pMem[1]); /* lsw */ #endif } #ifdef CONFIG_FPE_NWFPE_XP static inline void storeExtended(const unsigned int Fn, unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); union { floatx80 f; unsigned int i[3]; } val; switch (fpa11->fType[Fn]) { case typeSingle: val.f = float32_to_floatx80(fpa11->fpreg[Fn].fSingle); break; case typeDouble: val.f = float64_to_floatx80(fpa11->fpreg[Fn].fDouble); break; default: val.f = fpa11->fpreg[Fn].fExtended; } put_user(val.i[0], &pMem[0]); /* sign & exp */ #ifdef __ARMEB__ put_user(val.i[1], &pMem[1]); /* msw */ put_user(val.i[2], &pMem[2]); #else put_user(val.i[1], &pMem[2]); put_user(val.i[2], &pMem[1]); /* msw */ #endif } #endif static inline void storeMultiple(const unsigned int Fn, unsigned int __user *pMem) { FPA11 *fpa11 = GET_FPA11(); register unsigned int nType, *p; p = (unsigned int *) &(fpa11->fpreg[Fn]); nType = fpa11->fType[Fn]; switch (nType) { case typeSingle: case typeDouble: { put_user(p[0], &pMem[2]); /* single */ put_user(p[1], &pMem[1]); /* double msw */ put_user(nType << 14, &pMem[0]); } break; #ifdef CONFIG_FPE_NWFPE_XP case typeExtended: { put_user(p[2], &pMem[1]); /* msw */ put_user(p[1], &pMem[2]); put_user((p[0] & 0x80003fff) | (nType << 14), &pMem[0]); } break; #endif } } unsigned int PerformLDF(const unsigned int opcode) { unsigned int __user *pBase, *pAddress, *pFinal; unsigned int nRc = 1, write_back = WRITE_BACK(opcode); pBase = (unsigned int __user *) readRegister(getRn(opcode)); if (REG_PC == getRn(opcode)) { pBase += 2; write_back = 0; } pFinal = pBase; if (BIT_UP_SET(opcode)) pFinal += getOffset(opcode); else pFinal -= getOffset(opcode); if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; switch (opcode & MASK_TRANSFER_LENGTH) { case TRANSFER_SINGLE: loadSingle(getFd(opcode), pAddress); break; case TRANSFER_DOUBLE: loadDouble(getFd(opcode), pAddress); break; #ifdef CONFIG_FPE_NWFPE_XP case TRANSFER_EXTENDED: loadExtended(getFd(opcode), pAddress); break; #endif default: nRc = 0; } if (write_back) writeRegister(getRn(opcode), (unsigned long) pFinal); return nRc; } unsigned int PerformSTF(const unsigned int opcode) { unsigned int __user *pBase, *pAddress, *pFinal; unsigned int nRc = 1, write_back = WRITE_BACK(opcode); struct roundingData roundData; roundData.mode = SetRoundingMode(opcode); roundData.precision = SetRoundingPrecision(opcode); roundData.exception = 0; pBase = (unsigned int __user *) readRegister(getRn(opcode)); if (REG_PC == getRn(opcode)) { pBase += 2; write_back = 0; } pFinal = pBase; if (BIT_UP_SET(opcode)) pFinal += getOffset(opcode); else pFinal -= getOffset(opcode); if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; switch (opcode & MASK_TRANSFER_LENGTH) { case TRANSFER_SINGLE: storeSingle(&roundData, getFd(opcode), pAddress); break; case TRANSFER_DOUBLE: storeDouble(&roundData, getFd(opcode), pAddress); break; #ifdef CONFIG_FPE_NWFPE_XP case TRANSFER_EXTENDED: storeExtended(getFd(opcode), pAddress); break; #endif default: nRc = 0; } if (roundData.exception) float_raise(roundData.exception); if (write_back) writeRegister(getRn(opcode), (unsigned long) pFinal); return nRc; } unsigned int PerformLFM(const unsigned int opcode) { unsigned int __user *pBase, *pAddress, *pFinal; unsigned int i, Fd, write_back = WRITE_BACK(opcode); pBase = (unsigned int __user *) readRegister(getRn(opcode)); if (REG_PC == getRn(opcode)) { pBase += 2; write_back = 0; } pFinal = pBase; if (BIT_UP_SET(opcode)) pFinal += getOffset(opcode); else pFinal -= getOffset(opcode); if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; Fd = getFd(opcode); for (i = getRegisterCount(opcode); i > 0; i--) { loadMultiple(Fd, pAddress); pAddress += 3; Fd++; if (Fd == 8) Fd = 0; } if (write_back) writeRegister(getRn(opcode), (unsigned long) pFinal); return 1; } unsigned int PerformSFM(const unsigned int opcode) { unsigned int __user *pBase, *pAddress, *pFinal; unsigned int i, Fd, write_back = WRITE_BACK(opcode); pBase = (unsigned int __user *) readRegister(getRn(opcode)); if (REG_PC == getRn(opcode)) { pBase += 2; write_back = 0; } pFinal = pBase; if (BIT_UP_SET(opcode)) pFinal += getOffset(opcode); else pFinal -= getOffset(opcode); if (PREINDEXED(opcode)) pAddress = pFinal; else pAddress = pBase; Fd = getFd(opcode); for (i = getRegisterCount(opcode); i > 0; i--) { storeMultiple(Fd, pAddress); pAddress += 3; Fd++; if (Fd == 8) Fd = 0; } if (write_back) writeRegister(getRn(opcode), (unsigned long) pFinal); return 1; } unsigned int EmulateCPDT(const unsigned int opcode) { unsigned int nRc = 0; if (LDF_OP(opcode)) { nRc = PerformLDF(opcode); } else if (LFM_OP(opcode)) { nRc = PerformLFM(opcode); } else if (STF_OP(opcode)) { nRc = PerformSTF(opcode); } else if (SFM_OP(opcode)) { nRc = PerformSFM(opcode); } else { nRc = 0; } return nRc; }
gpl-2.0
imang/gcore_kernel
drivers/media/video/msm/mt9d113.c
240
15789
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <media/msm_camera.h> #include <mach/gpio.h> #include "mt9d113.h" /* Micron MT9D113 Registers and their values */ #define REG_MT9D113_MODEL_ID 0x0000 #define MT9D113_MODEL_ID 0x2580 #define Q8 0x00000100 struct mt9d113_work { struct work_struct work; }; static struct mt9d113_work *mt9d113_sensorw; static struct i2c_client *mt9d113_client; struct mt9d113_ctrl { const struct msm_camera_sensor_info *sensordata; uint32_t sensormode; uint32_t fps_divider;/* init to 1 * 0x00000400 */ uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */ uint16_t fps; uint16_t curr_step_pos; uint16_t my_reg_gain; uint32_t my_reg_line_count; uint16_t total_lines_per_frame; uint16_t config_csi; enum mt9d113_resolution_t prev_res; enum mt9d113_resolution_t pict_res; enum mt9d113_resolution_t curr_res; enum mt9d113_test_mode_t set_test; }; static struct mt9d113_ctrl *mt9d113_ctrl; static DECLARE_WAIT_QUEUE_HEAD(mt9d113_wait_queue); DEFINE_MUTEX(mt9d113_mut); static int mt9d113_i2c_rxdata(unsigned short saddr, unsigned char *rxdata, int length) { struct i2c_msg msgs[] = { { .addr = saddr, .flags = 0, .len = 2, .buf = rxdata, }, { .addr = saddr, .flags = I2C_M_RD, .len = length, .buf = rxdata, }, }; if (i2c_transfer(mt9d113_client->adapter, msgs, 2) < 0) { CDBG("mt9d113_i2c_rxdata failed!\n"); return -EIO; } return 0; } static int32_t mt9d113_i2c_read(unsigned short saddr, unsigned short raddr, unsigned short *rdata, enum mt9d113_width width) { int32_t rc = 0; unsigned char buf[4]; if (!rdata) return -EIO; memset(buf, 0, sizeof(buf)); switch (width) { case WORD_LEN: { buf[0] = (raddr & 0xFF00)>>8; buf[1] = (raddr & 0x00FF); rc = mt9d113_i2c_rxdata(saddr, buf, 2); if (rc < 0) return rc; *rdata = buf[0] << 8 | buf[1]; } break; default: break; } if (rc < 0) CDBG("mt9d113_i2c_read failed !\n"); return rc; } static int32_t mt9d113_i2c_txdata(unsigned short saddr, unsigned char *txdata, int length) { struct i2c_msg msg[] = { { .addr = saddr, .flags = 0, .len = length, .buf = txdata, }, }; if (i2c_transfer(mt9d113_client->adapter, msg, 1) < 0) { CDBG("mt9d113_i2c_txdata failed\n"); return -EIO; } return 0; } static int32_t mt9d113_i2c_write(unsigned short saddr, unsigned short waddr, unsigned short wdata, enum mt9d113_width width) { int32_t rc = -EIO; unsigned char buf[4]; memset(buf, 0, sizeof(buf)); switch (width) { case WORD_LEN: { buf[0] = (waddr & 0xFF00)>>8; buf[1] = (waddr & 0x00FF); buf[2] = (wdata & 0xFF00)>>8; buf[3] = (wdata & 0x00FF); rc = mt9d113_i2c_txdata(saddr, buf, 4); } break; case BYTE_LEN: { buf[0] = waddr; buf[1] = wdata; rc = mt9d113_i2c_txdata(saddr, buf, 2); } break; default: break; } if (rc < 0) printk(KERN_ERR "i2c_write failed, addr = 0x%x, val = 0x%x!\n", waddr, wdata); return rc; } static int32_t mt9d113_i2c_write_table( struct mt9d113_i2c_reg_conf const *reg_conf_tbl, int num_of_items_in_table) { int i; int32_t rc = -EIO; for (i = 0; i < num_of_items_in_table; i++) { rc = mt9d113_i2c_write(mt9d113_client->addr, reg_conf_tbl->waddr, reg_conf_tbl->wdata, WORD_LEN); if (rc < 0) break; reg_conf_tbl++; } return rc; } static long mt9d113_reg_init(void) { uint16_t data = 0; int32_t rc = 0; int count = 0; struct msm_camera_csi_params mt9d113_csi_params; if (!mt9d113_ctrl->config_csi) { mt9d113_csi_params.lane_cnt = 1; mt9d113_csi_params.data_format = CSI_8BIT; mt9d113_csi_params.lane_assign = 0xe4; mt9d113_csi_params.dpcm_scheme = 0; mt9d113_csi_params.settle_cnt = 0x14; rc = msm_camio_csi_config(&mt9d113_csi_params); mt9d113_ctrl->config_csi = 1; msleep(50); } /* Disable parallel and enable mipi*/ rc = mt9d113_i2c_write(mt9d113_client->addr, 0x001A, 0x0051, WORD_LEN); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x001A, 0x0050, WORD_LEN); msleep(20); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x001A, 0x0058, WORD_LEN); /* Preset pll settings begin*/ rc = mt9d113_i2c_write_table(&mt9d113_regs.pll_tbl[0], mt9d113_regs.pll_tbl_size); if (rc < 0) return rc; rc = mt9d113_i2c_read(mt9d113_client->addr, 0x0014, &data, WORD_LEN); data = data&0x8000; /* Poll*/ while (data == 0x0000) { data = 0; rc = mt9d113_i2c_read(mt9d113_client->addr, 0x0014, &data, WORD_LEN); data = data & 0x8000; usleep_range(11000, 12000); count++; if (count == 100) { CDBG(" Timeout:1\n"); break; } } rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0014, 0x20FA, WORD_LEN); /*Preset pll Ends*/ mt9d113_i2c_write(mt9d113_client->addr, 0x0018, 0x402D, WORD_LEN); mt9d113_i2c_write(mt9d113_client->addr, 0x0018, 0x402C, WORD_LEN); /*POLL_REG=0x0018,0x4000,!=0x0000,DELAY=10,TIMEOUT=100*/ data = 0; rc = mt9d113_i2c_read(mt9d113_client->addr, 0x0018, &data, WORD_LEN); data = data & 0x4000; count = 0; while (data != 0x0000) { rc = mt9d113_i2c_read(mt9d113_client->addr, 0x0018, &data, WORD_LEN); data = data & 0x4000; CDBG(" data is %d\n" , data); usleep_range(11000, 12000); count++; if (count == 100) { CDBG(" Loop2 timeout: MT9D113\n"); break; } CDBG(" Not streaming\n"); } CDBG("MT9D113: Start stream\n"); /*Preset Register Wizard Conf*/ rc = mt9d113_i2c_write_table(&mt9d113_regs.register_tbl[0], mt9d113_regs.register_tbl_size); if (rc < 0) return rc; rc = mt9d113_i2c_write_table(&mt9d113_regs.err_tbl[0], mt9d113_regs.err_tbl_size); if (rc < 0) return rc; rc = mt9d113_i2c_write_table(&mt9d113_regs.eeprom_tbl[0], mt9d113_regs.eeprom_tbl_size); if (rc < 0) return rc; rc = mt9d113_i2c_write_table(&mt9d113_regs.low_light_tbl[0], mt9d113_regs.low_light_tbl_size); if (rc < 0) return rc; rc = mt9d113_i2c_write_table(&mt9d113_regs.awb_tbl[0], mt9d113_regs.awb_tbl_size); if (rc < 0) return rc; rc = mt9d113_i2c_write_table(&mt9d113_regs.patch_tbl[0], mt9d113_regs.patch_tbl_size); if (rc < 0) return rc; /*check patch load*/ mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA024, WORD_LEN); count = 0; /*To check if patch is loaded properly poll the register 0x990 till the condition is met or till the timeout*/ data = 0; rc = mt9d113_i2c_read(mt9d113_client->addr, 0x0990, &data, WORD_LEN); while (data == 0) { data = 0; rc = mt9d113_i2c_read(mt9d113_client->addr, 0x0990, &data, WORD_LEN); usleep_range(11000, 12000); count++; if (count == 100) { CDBG("Timeout in patch loading\n"); break; } } /*BITFIELD=0x0018, 0x0004, 0*/ /*Preset continue begin */ rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0018, 0x0028, WORD_LEN); CDBG(" mt9d113 wait for seq done\n"); /* syncronize the FW with the sensor MCU_ADDRESS [SEQ_CMD]*/ rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0006, WORD_LEN); /*mt9d113 wait for seq done syncronize the FW with the sensor */ msleep(20); /*Preset continue end */ CDBG(" MT9D113: Preset continue end\n"); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0012, 0x00F5, WORD_LEN); /*continue begin */ CDBG(" MT9D113: Preset continue begin\n"); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0018, 0x0028 , WORD_LEN); /*mt9d113 wait for seq done syncronize the FW with the sensor MCU_ADDRESS [SEQ_CMD]*/ msleep(20); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); /* MCU DATA */ rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0006, WORD_LEN); /*mt9d113 wait for seq done syncronize the FW with the sensor */ /* MCU_ADDRESS [SEQ_CMD]*/ msleep(20); /*Preset continue end*/ return rc; } static long mt9d113_set_sensor_mode(int mode) { long rc = 0; switch (mode) { case SENSOR_PREVIEW_MODE: rc = mt9d113_reg_init(); CDBG("MT9D113: configure to preview begin\n"); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA115, WORD_LEN); if (rc < 0) return rc; rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0000, WORD_LEN); if (rc < 0) return rc; rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); if (rc < 0) return rc; rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0001, WORD_LEN); if (rc < 0) return rc; break; case SENSOR_SNAPSHOT_MODE: case SENSOR_RAW_SNAPSHOT_MODE: rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA115, WORD_LEN); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x098C, 0xA103, WORD_LEN); rc = mt9d113_i2c_write(mt9d113_client->addr, 0x0990, 0x0002, WORD_LEN); break; default: return -EINVAL; } return 0; } static int mt9d113_sensor_init_probe(const struct msm_camera_sensor_info * data) { uint16_t model_id = 0; int rc = 0; /* Read the Model ID of the sensor */ rc = mt9d113_i2c_read(mt9d113_client->addr, REG_MT9D113_MODEL_ID, &model_id, WORD_LEN); if (rc < 0) goto init_probe_fail; /* Check if it matches it with the value in Datasheet */ if (model_id != MT9D113_MODEL_ID) printk(KERN_INFO "mt9d113 model_id = 0x%x\n", model_id); if (rc < 0) goto init_probe_fail; return rc; init_probe_fail: printk(KERN_INFO "probe fail\n"); return rc; } static int mt9d113_init_client(struct i2c_client *client) { /* Initialize the MSM_CAMI2C Chip */ init_waitqueue_head(&mt9d113_wait_queue); return 0; } int mt9d113_sensor_config(void __user *argp) { struct sensor_cfg_data cfg_data; long rc = 0; if (copy_from_user(&cfg_data, (void *)argp, (sizeof(struct sensor_cfg_data)))) return -EFAULT; mutex_lock(&mt9d113_mut); CDBG("mt9d113_ioctl, cfgtype = %d, mode = %d\n", cfg_data.cfgtype, cfg_data.mode); switch (cfg_data.cfgtype) { case CFG_SET_MODE: rc = mt9d113_set_sensor_mode( cfg_data.mode); break; case CFG_SET_EFFECT: return rc; case CFG_GET_AF_MAX_STEPS: default: rc = -EINVAL; break; } mutex_unlock(&mt9d113_mut); return rc; } int mt9d113_sensor_release(void) { int rc = 0; mutex_lock(&mt9d113_mut); gpio_set_value_cansleep(mt9d113_ctrl->sensordata->sensor_reset, 0); msleep(20); gpio_free(mt9d113_ctrl->sensordata->sensor_reset); kfree(mt9d113_ctrl); mutex_unlock(&mt9d113_mut); return rc; } static int mt9d113_probe_init_done(const struct msm_camera_sensor_info *data) { gpio_free(data->sensor_reset); return 0; } static int mt9d113_probe_init_sensor(const struct msm_camera_sensor_info *data) { int32_t rc = 0; uint16_t chipid = 0; rc = gpio_request(data->sensor_reset, "mt9d113"); printk(KERN_INFO " mt9d113_probe_init_sensor\n"); if (!rc) { printk(KERN_INFO "sensor_reset = %d\n", rc); gpio_direction_output(data->sensor_reset, 0); usleep_range(11000, 12000); gpio_set_value_cansleep(data->sensor_reset, 1); usleep_range(11000, 12000); } else goto init_probe_done; printk(KERN_INFO " mt9d113_probe_init_sensor called\n"); rc = mt9d113_i2c_read(mt9d113_client->addr, REG_MT9D113_MODEL_ID, &chipid, WORD_LEN); if (rc < 0) goto init_probe_fail; /*Compare sensor ID to MT9D113 ID: */ if (chipid != MT9D113_MODEL_ID) { printk(KERN_INFO "mt9d113_probe_init_sensor chip id is%d\n", chipid); } CDBG("mt9d113_probe_init_sensor Success\n"); goto init_probe_done; init_probe_fail: CDBG(" ov2720_probe_init_sensor fails\n"); gpio_set_value_cansleep(data->sensor_reset, 0); mt9d113_probe_init_done(data); init_probe_done: printk(KERN_INFO " mt9d113_probe_init_sensor finishes\n"); return rc; } static int mt9d113_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { rc = -ENOTSUPP; goto probe_failure; } mt9d113_sensorw = kzalloc(sizeof(struct mt9d113_work), GFP_KERNEL); if (!mt9d113_sensorw) { rc = -ENOMEM; goto probe_failure; } i2c_set_clientdata(client, mt9d113_sensorw); mt9d113_init_client(client); mt9d113_client = client; CDBG("mt9d113_probe succeeded!\n"); return 0; probe_failure: kfree(mt9d113_sensorw); mt9d113_sensorw = NULL; CDBG("mt9d113_probe failed!\n"); return rc; } static const struct i2c_device_id mt9d113_i2c_id[] = { { "mt9d113", 0}, {}, }; static struct i2c_driver mt9d113_i2c_driver = { .id_table = mt9d113_i2c_id, .probe = mt9d113_i2c_probe, .remove = __exit_p(mt9d113_i2c_remove), .driver = { .name = "mt9d113", }, }; int mt9d113_sensor_open_init(const struct msm_camera_sensor_info *data) { int32_t rc = 0; mt9d113_ctrl = kzalloc(sizeof(struct mt9d113_ctrl), GFP_KERNEL); if (!mt9d113_ctrl) { printk(KERN_INFO "mt9d113_init failed!\n"); rc = -ENOMEM; goto init_done; } mt9d113_ctrl->fps_divider = 1 * 0x00000400; mt9d113_ctrl->pict_fps_divider = 1 * 0x00000400; mt9d113_ctrl->set_test = TEST_OFF; mt9d113_ctrl->config_csi = 0; mt9d113_ctrl->prev_res = QTR_SIZE; mt9d113_ctrl->pict_res = FULL_SIZE; mt9d113_ctrl->curr_res = INVALID_SIZE; if (data) mt9d113_ctrl->sensordata = data; if (rc < 0) { printk(KERN_INFO "mt9d113_sensor_open_init fail\n"); return rc; } /* enable mclk first */ msm_camio_clk_rate_set(24000000); msleep(20); rc = mt9d113_probe_init_sensor(data); if (rc < 0) goto init_fail; mt9d113_ctrl->fps = 30*Q8; rc = mt9d113_sensor_init_probe(data); if (rc < 0) { gpio_set_value_cansleep(data->sensor_reset, 0); goto init_fail; } else printk(KERN_ERR "%s: %d\n", __func__, __LINE__); goto init_done; init_fail: printk(KERN_INFO "init_fail\n"); mt9d113_probe_init_done(data); init_done: CDBG("init_done\n"); return rc; } static int mt9d113_sensor_probe(const struct msm_camera_sensor_info *info, struct msm_sensor_ctrl *s) { int rc = 0; rc = i2c_add_driver(&mt9d113_i2c_driver); if (rc < 0 || mt9d113_client == NULL) { rc = -ENOTSUPP; goto probe_fail; } msm_camio_clk_rate_set(24000000); usleep_range(5000, 6000); rc = mt9d113_probe_init_sensor(info); if (rc < 0) goto probe_fail; s->s_init = mt9d113_sensor_open_init; s->s_release = mt9d113_sensor_release; s->s_config = mt9d113_sensor_config; s->s_camera_type = FRONT_CAMERA_2D; s->s_mount_angle = 0; gpio_set_value_cansleep(info->sensor_reset, 0); mt9d113_probe_init_done(info); return rc; probe_fail: printk(KERN_INFO "mt9d113_sensor_probe: SENSOR PROBE FAILS!\n"); return rc; } static int __mt9d113_probe(struct platform_device *pdev) { return msm_camera_drv_start(pdev, mt9d113_sensor_probe); } static struct platform_driver msm_camera_driver = { .probe = __mt9d113_probe, .driver = { .name = "msm_cam_mt9d113", .owner = THIS_MODULE, }, }; static int __init mt9d113_init(void) { return platform_driver_register(&msm_camera_driver); } module_init(mt9d113_init); MODULE_DESCRIPTION("Micron 2MP YUV sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
dpuyosa/android_kernel_wiko_l5460
drivers/macintosh/via-pmu-backlight.c
1776
4518
/* * Backlight code for via-pmu * * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi. * Copyright (C) 2001-2002 Benjamin Herrenschmidt * Copyright (C) 2006 Michael Hanselmann <linux-kernel@hansmi.ch> * */ #include <asm/ptrace.h> #include <linux/adb.h> #include <linux/pmu.h> #include <asm/backlight.h> #include <asm/prom.h> #define MAX_PMU_LEVEL 0xFF static const struct backlight_ops pmu_backlight_data; static DEFINE_SPINLOCK(pmu_backlight_lock); static int sleeping, uses_pmu_bl; static u8 bl_curve[FB_BACKLIGHT_LEVELS]; static void pmu_backlight_init_curve(u8 off, u8 min, u8 max) { int i, flat, count, range = (max - min); bl_curve[0] = off; for (flat = 1; flat < (FB_BACKLIGHT_LEVELS / 16); ++flat) bl_curve[flat] = min; count = FB_BACKLIGHT_LEVELS * 15 / 16; for (i = 0; i < count; ++i) bl_curve[flat + i] = min + (range * (i + 1) / count); } static int pmu_backlight_curve_lookup(int value) { int level = (FB_BACKLIGHT_LEVELS - 1); int i, max = 0; /* Look for biggest value */ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) max = max((int)bl_curve[i], max); /* Look for nearest value */ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) { int diff = abs(bl_curve[i] - value); if (diff < max) { max = diff; level = i; } } return level; } static int pmu_backlight_get_level_brightness(int level) { int pmulevel; /* Get and convert the value */ pmulevel = bl_curve[level] * FB_BACKLIGHT_MAX / MAX_PMU_LEVEL; if (pmulevel < 0) pmulevel = 0; else if (pmulevel > MAX_PMU_LEVEL) pmulevel = MAX_PMU_LEVEL; return pmulevel; } static int __pmu_backlight_update_status(struct backlight_device *bd) { struct adb_request req; int level = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK || bd->props.fb_blank != FB_BLANK_UNBLANK) level = 0; if (level > 0) { int pmulevel = pmu_backlight_get_level_brightness(level); pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmulevel); pmu_wait_complete(&req); pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_BACKLIGHT | PMU_POW_ON); pmu_wait_complete(&req); } else { pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_BACKLIGHT | PMU_POW_OFF); pmu_wait_complete(&req); } return 0; } static int pmu_backlight_update_status(struct backlight_device *bd) { unsigned long flags; int rc = 0; spin_lock_irqsave(&pmu_backlight_lock, flags); /* Don't update brightness when sleeping */ if (!sleeping) rc = __pmu_backlight_update_status(bd); spin_unlock_irqrestore(&pmu_backlight_lock, flags); return rc; } static const struct backlight_ops pmu_backlight_data = { .update_status = pmu_backlight_update_status, }; #ifdef CONFIG_PM void pmu_backlight_set_sleep(int sleep) { unsigned long flags; spin_lock_irqsave(&pmu_backlight_lock, flags); sleeping = sleep; if (pmac_backlight && uses_pmu_bl) { if (sleep) { struct adb_request req; pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_BACKLIGHT | PMU_POW_OFF); pmu_wait_complete(&req); } else __pmu_backlight_update_status(pmac_backlight); } spin_unlock_irqrestore(&pmu_backlight_lock, flags); } #endif /* CONFIG_PM */ void __init pmu_backlight_init() { struct backlight_properties props; struct backlight_device *bd; char name[10]; int level, autosave; /* Special case for the old PowerBook since I can't test on it */ autosave = of_machine_is_compatible("AAPL,3400/2400") || of_machine_is_compatible("AAPL,3500"); if (!autosave && !pmac_has_backlight_type("pmu") && !of_machine_is_compatible("AAPL,PowerBook1998") && !of_machine_is_compatible("PowerBook1,1")) return; snprintf(name, sizeof(name), "pmubl"); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data, &props); if (IS_ERR(bd)) { printk(KERN_ERR "PMU Backlight registration failed\n"); return; } uses_pmu_bl = 1; pmu_backlight_init_curve(0x7F, 0x46, 0x0E); level = bd->props.max_brightness; if (autosave) { /* read autosaved value if available */ struct adb_request req; pmu_request(&req, NULL, 2, 0xd9, 0); pmu_wait_complete(&req); level = pmu_backlight_curve_lookup( (req.reply[0] >> 4) * bd->props.max_brightness / 15); } bd->props.brightness = level; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); printk(KERN_INFO "PMU Backlight initialized (%s)\n", name); }
gpl-2.0
kirananto/RaZorLettuce
drivers/net/ethernet/cirrus/ep93xx_eth.c
2032
20980
/* * EP93xx ethernet network device driver * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * Dedicated to Marija Kulikova. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/mii.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/hardware.h> #define DRV_MODULE_NAME "ep93xx-eth" #define DRV_MODULE_VERSION "0.1" #define RX_QUEUE_ENTRIES 64 #define TX_QUEUE_ENTRIES 8 #define MAX_PKT_SIZE 2044 #define PKT_BUF_SIZE 2048 #define REG_RXCTL 0x0000 #define REG_RXCTL_DEFAULT 0x00073800 #define REG_TXCTL 0x0004 #define REG_TXCTL_ENABLE 0x00000001 #define REG_MIICMD 0x0010 #define REG_MIICMD_READ 0x00008000 #define REG_MIICMD_WRITE 0x00004000 #define REG_MIIDATA 0x0014 #define REG_MIISTS 0x0018 #define REG_MIISTS_BUSY 0x00000001 #define REG_SELFCTL 0x0020 #define REG_SELFCTL_RESET 0x00000001 #define REG_INTEN 0x0024 #define REG_INTEN_TX 0x00000008 #define REG_INTEN_RX 0x00000007 #define REG_INTSTSP 0x0028 #define REG_INTSTS_TX 0x00000008 #define REG_INTSTS_RX 0x00000004 #define REG_INTSTSC 0x002c #define REG_AFP 0x004c #define REG_INDAD0 0x0050 #define REG_INDAD1 0x0051 #define REG_INDAD2 0x0052 #define REG_INDAD3 0x0053 #define REG_INDAD4 0x0054 #define REG_INDAD5 0x0055 #define REG_GIINTMSK 0x0064 #define REG_GIINTMSK_ENABLE 0x00008000 #define REG_BMCTL 0x0080 #define REG_BMCTL_ENABLE_TX 0x00000100 #define REG_BMCTL_ENABLE_RX 0x00000001 #define REG_BMSTS 0x0084 #define REG_BMSTS_RX_ACTIVE 0x00000008 #define REG_RXDQBADD 0x0090 #define REG_RXDQBLEN 0x0094 #define REG_RXDCURADD 0x0098 #define REG_RXDENQ 0x009c #define REG_RXSTSQBADD 0x00a0 #define REG_RXSTSQBLEN 0x00a4 #define REG_RXSTSQCURADD 0x00a8 #define REG_RXSTSENQ 0x00ac #define REG_TXDQBADD 0x00b0 #define REG_TXDQBLEN 0x00b4 #define REG_TXDQCURADD 0x00b8 #define REG_TXDENQ 0x00bc #define REG_TXSTSQBADD 0x00c0 #define REG_TXSTSQBLEN 0x00c4 #define REG_TXSTSQCURADD 0x00c8 #define REG_MAXFRMLEN 0x00e8 struct ep93xx_rdesc { u32 buf_addr; u32 rdesc1; }; #define RDESC1_NSOF 0x80000000 #define RDESC1_BUFFER_INDEX 0x7fff0000 #define RDESC1_BUFFER_LENGTH 0x0000ffff struct ep93xx_rstat { u32 rstat0; u32 rstat1; }; #define RSTAT0_RFP 0x80000000 #define RSTAT0_RWE 0x40000000 #define RSTAT0_EOF 0x20000000 #define RSTAT0_EOB 0x10000000 #define RSTAT0_AM 0x00c00000 #define RSTAT0_RX_ERR 0x00200000 #define RSTAT0_OE 0x00100000 #define RSTAT0_FE 0x00080000 #define RSTAT0_RUNT 0x00040000 #define RSTAT0_EDATA 0x00020000 #define RSTAT0_CRCE 0x00010000 #define RSTAT0_CRCI 0x00008000 #define RSTAT0_HTI 0x00003f00 #define RSTAT1_RFP 0x80000000 #define RSTAT1_BUFFER_INDEX 0x7fff0000 #define RSTAT1_FRAME_LENGTH 0x0000ffff struct ep93xx_tdesc { u32 buf_addr; u32 tdesc1; }; #define TDESC1_EOF 0x80000000 #define TDESC1_BUFFER_INDEX 0x7fff0000 #define TDESC1_BUFFER_ABORT 0x00008000 #define TDESC1_BUFFER_LENGTH 0x00000fff struct ep93xx_tstat { u32 tstat0; }; #define TSTAT0_TXFP 0x80000000 #define TSTAT0_TXWE 0x40000000 #define TSTAT0_FA 0x20000000 #define TSTAT0_LCRS 0x10000000 #define TSTAT0_OW 0x04000000 #define TSTAT0_TXU 0x02000000 #define TSTAT0_ECOLL 0x01000000 #define TSTAT0_NCOLL 0x001f0000 #define TSTAT0_BUFFER_INDEX 0x00007fff struct ep93xx_descs { struct ep93xx_rdesc rdesc[RX_QUEUE_ENTRIES]; struct ep93xx_tdesc tdesc[TX_QUEUE_ENTRIES]; struct ep93xx_rstat rstat[RX_QUEUE_ENTRIES]; struct ep93xx_tstat tstat[TX_QUEUE_ENTRIES]; }; struct ep93xx_priv { struct resource *res; void __iomem *base_addr; int irq; struct ep93xx_descs *descs; dma_addr_t descs_dma_addr; void *rx_buf[RX_QUEUE_ENTRIES]; void *tx_buf[TX_QUEUE_ENTRIES]; spinlock_t rx_lock; unsigned int rx_pointer; unsigned int tx_clean_pointer; unsigned int tx_pointer; spinlock_t tx_pending_lock; unsigned int tx_pending; struct net_device *dev; struct napi_struct napi; struct mii_if_info mii; u8 mdc_divisor; }; #define rdb(ep, off) __raw_readb((ep)->base_addr + (off)) #define rdw(ep, off) __raw_readw((ep)->base_addr + (off)) #define rdl(ep, off) __raw_readl((ep)->base_addr + (off)) #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off)) #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg) { struct ep93xx_priv *ep = netdev_priv(dev); int data; int i; wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); for (i = 0; i < 10; i++) { if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) break; msleep(1); } if (i == 10) { pr_info("mdio read timed out\n"); data = 0xffff; } else { data = rdl(ep, REG_MIIDATA); } return data; } static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data) { struct ep93xx_priv *ep = netdev_priv(dev); int i; wrl(ep, REG_MIIDATA, data); wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); for (i = 0; i < 10; i++) { if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) break; msleep(1); } if (i == 10) pr_info("mdio write timed out\n"); } static int ep93xx_rx(struct net_device *dev, int processed, int budget) { struct ep93xx_priv *ep = netdev_priv(dev); while (processed < budget) { int entry; struct ep93xx_rstat *rstat; u32 rstat0; u32 rstat1; int length; struct sk_buff *skb; entry = ep->rx_pointer; rstat = ep->descs->rstat + entry; rstat0 = rstat->rstat0; rstat1 = rstat->rstat1; if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) break; rstat->rstat0 = 0; rstat->rstat1 = 0; if (!(rstat0 & RSTAT0_EOF)) pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1); if (!(rstat0 & RSTAT0_EOB)) pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1); if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1); if (!(rstat0 & RSTAT0_RWE)) { dev->stats.rx_errors++; if (rstat0 & RSTAT0_OE) dev->stats.rx_fifo_errors++; if (rstat0 & RSTAT0_FE) dev->stats.rx_frame_errors++; if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) dev->stats.rx_length_errors++; if (rstat0 & RSTAT0_CRCE) dev->stats.rx_crc_errors++; goto err; } length = rstat1 & RSTAT1_FRAME_LENGTH; if (length > MAX_PKT_SIZE) { pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1); goto err; } /* Strip FCS. */ if (rstat0 & RSTAT0_CRCI) length -= 4; skb = netdev_alloc_skb(dev, length + 2); if (likely(skb != NULL)) { struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; skb_reserve(skb, 2); dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr, length, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); dma_sync_single_for_device(dev->dev.parent, rxd->buf_addr, length, DMA_FROM_DEVICE); skb_put(skb, length); skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += length; } else { dev->stats.rx_dropped++; } err: ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); processed++; } return processed; } static int ep93xx_have_more_rx(struct ep93xx_priv *ep) { struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer; return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); } static int ep93xx_poll(struct napi_struct *napi, int budget) { struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); struct net_device *dev = ep->dev; int rx = 0; poll_some_more: rx = ep93xx_rx(dev, rx, budget); if (rx < budget) { int more = 0; spin_lock_irq(&ep->rx_lock); __napi_complete(napi); wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); if (ep93xx_have_more_rx(ep)) { wrl(ep, REG_INTEN, REG_INTEN_TX); wrl(ep, REG_INTSTSP, REG_INTSTS_RX); more = 1; } spin_unlock_irq(&ep->rx_lock); if (more && napi_reschedule(napi)) goto poll_some_more; } if (rx) { wrw(ep, REG_RXDENQ, rx); wrw(ep, REG_RXSTSENQ, rx); } return rx; } static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); struct ep93xx_tdesc *txd; int entry; if (unlikely(skb->len > MAX_PKT_SIZE)) { dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } entry = ep->tx_pointer; ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); txd = &ep->descs->tdesc[entry]; txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len, DMA_TO_DEVICE); skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); spin_lock_irq(&ep->tx_pending_lock); ep->tx_pending++; if (ep->tx_pending == TX_QUEUE_ENTRIES) netif_stop_queue(dev); spin_unlock_irq(&ep->tx_pending_lock); wrl(ep, REG_TXDENQ, 1); return NETDEV_TX_OK; } static void ep93xx_tx_complete(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); int wake; wake = 0; spin_lock(&ep->tx_pending_lock); while (1) { int entry; struct ep93xx_tstat *tstat; u32 tstat0; entry = ep->tx_clean_pointer; tstat = ep->descs->tstat + entry; tstat0 = tstat->tstat0; if (!(tstat0 & TSTAT0_TXFP)) break; tstat->tstat0 = 0; if (tstat0 & TSTAT0_FA) pr_crit("frame aborted %.8x\n", tstat0); if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) pr_crit("entry mismatch %.8x\n", tstat0); if (tstat0 & TSTAT0_TXWE) { int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; dev->stats.tx_packets++; dev->stats.tx_bytes += length; } else { dev->stats.tx_errors++; } if (tstat0 & TSTAT0_OW) dev->stats.tx_window_errors++; if (tstat0 & TSTAT0_TXU) dev->stats.tx_fifo_errors++; dev->stats.collisions += (tstat0 >> 16) & 0x1f; ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); if (ep->tx_pending == TX_QUEUE_ENTRIES) wake = 1; ep->tx_pending--; } spin_unlock(&ep->tx_pending_lock); if (wake) netif_wake_queue(dev); } static irqreturn_t ep93xx_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct ep93xx_priv *ep = netdev_priv(dev); u32 status; status = rdl(ep, REG_INTSTSC); if (status == 0) return IRQ_NONE; if (status & REG_INTSTS_RX) { spin_lock(&ep->rx_lock); if (likely(napi_schedule_prep(&ep->napi))) { wrl(ep, REG_INTEN, REG_INTEN_TX); __napi_schedule(&ep->napi); } spin_unlock(&ep->rx_lock); } if (status & REG_INTSTS_TX) ep93xx_tx_complete(dev); return IRQ_HANDLED; } static void ep93xx_free_buffers(struct ep93xx_priv *ep) { struct device *dev = ep->dev->dev.parent; int i; for (i = 0; i < RX_QUEUE_ENTRIES; i++) { dma_addr_t d; d = ep->descs->rdesc[i].buf_addr; if (d) dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); if (ep->rx_buf[i] != NULL) kfree(ep->rx_buf[i]); } for (i = 0; i < TX_QUEUE_ENTRIES; i++) { dma_addr_t d; d = ep->descs->tdesc[i].buf_addr; if (d) dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); if (ep->tx_buf[i] != NULL) kfree(ep->tx_buf[i]); } dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, ep->descs_dma_addr); } static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) { struct device *dev = ep->dev->dev.parent; int i; ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), &ep->descs_dma_addr, GFP_KERNEL); if (ep->descs == NULL) return 1; for (i = 0; i < RX_QUEUE_ENTRIES; i++) { void *buf; dma_addr_t d; buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); if (buf == NULL) goto err; d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, d)) { kfree(buf); goto err; } ep->rx_buf[i] = buf; ep->descs->rdesc[i].buf_addr = d; ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; } for (i = 0; i < TX_QUEUE_ENTRIES; i++) { void *buf; dma_addr_t d; buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); if (buf == NULL) goto err; d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, d)) { kfree(buf); goto err; } ep->tx_buf[i] = buf; ep->descs->tdesc[i].buf_addr = d; } return 0; err: ep93xx_free_buffers(ep); return 1; } static int ep93xx_start_hw(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); unsigned long addr; int i; wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); for (i = 0; i < 10; i++) { if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) break; msleep(1); } if (i == 10) { pr_crit("hw failed to reset\n"); return 1; } wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); /* Does the PHY support preamble suppress? */ if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); /* Receive descriptor ring. */ addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); wrl(ep, REG_RXDQBADD, addr); wrl(ep, REG_RXDCURADD, addr); wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); /* Receive status ring. */ addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); wrl(ep, REG_RXSTSQBADD, addr); wrl(ep, REG_RXSTSQCURADD, addr); wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); /* Transmit descriptor ring. */ addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); wrl(ep, REG_TXDQBADD, addr); wrl(ep, REG_TXDQCURADD, addr); wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); /* Transmit status ring. */ addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); wrl(ep, REG_TXSTSQBADD, addr); wrl(ep, REG_TXSTSQCURADD, addr); wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); wrl(ep, REG_GIINTMSK, 0); for (i = 0; i < 10; i++) { if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) break; msleep(1); } if (i == 10) { pr_crit("hw failed to start\n"); return 1; } wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); wrb(ep, REG_INDAD0, dev->dev_addr[0]); wrb(ep, REG_INDAD1, dev->dev_addr[1]); wrb(ep, REG_INDAD2, dev->dev_addr[2]); wrb(ep, REG_INDAD3, dev->dev_addr[3]); wrb(ep, REG_INDAD4, dev->dev_addr[4]); wrb(ep, REG_INDAD5, dev->dev_addr[5]); wrl(ep, REG_AFP, 0); wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); return 0; } static void ep93xx_stop_hw(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); int i; wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); for (i = 0; i < 10; i++) { if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) break; msleep(1); } if (i == 10) pr_crit("hw failed to reset\n"); } static int ep93xx_open(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); int err; if (ep93xx_alloc_buffers(ep)) return -ENOMEM; napi_enable(&ep->napi); if (ep93xx_start_hw(dev)) { napi_disable(&ep->napi); ep93xx_free_buffers(ep); return -EIO; } spin_lock_init(&ep->rx_lock); ep->rx_pointer = 0; ep->tx_clean_pointer = 0; ep->tx_pointer = 0; spin_lock_init(&ep->tx_pending_lock); ep->tx_pending = 0; err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); if (err) { napi_disable(&ep->napi); ep93xx_stop_hw(dev); ep93xx_free_buffers(ep); return err; } wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); netif_start_queue(dev); return 0; } static int ep93xx_close(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); napi_disable(&ep->napi); netif_stop_queue(dev); wrl(ep, REG_GIINTMSK, 0); free_irq(ep->irq, dev); ep93xx_stop_hw(dev); ep93xx_free_buffers(ep); return 0; } static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ep93xx_priv *ep = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); return generic_mii_ioctl(&ep->mii, data, cmd, NULL); } static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ep93xx_priv *ep = netdev_priv(dev); return mii_ethtool_gset(&ep->mii, cmd); } static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ep93xx_priv *ep = netdev_priv(dev); return mii_ethtool_sset(&ep->mii, cmd); } static int ep93xx_nway_reset(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); return mii_nway_restart(&ep->mii); } static u32 ep93xx_get_link(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); return mii_link_ok(&ep->mii); } static const struct ethtool_ops ep93xx_ethtool_ops = { .get_drvinfo = ep93xx_get_drvinfo, .get_settings = ep93xx_get_settings, .set_settings = ep93xx_set_settings, .nway_reset = ep93xx_nway_reset, .get_link = ep93xx_get_link, }; static const struct net_device_ops ep93xx_netdev_ops = { .ndo_open = ep93xx_open, .ndo_stop = ep93xx_close, .ndo_start_xmit = ep93xx_xmit, .ndo_do_ioctl = ep93xx_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) { struct net_device *dev; dev = alloc_etherdev(sizeof(struct ep93xx_priv)); if (dev == NULL) return NULL; memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); dev->ethtool_ops = &ep93xx_ethtool_ops; dev->netdev_ops = &ep93xx_netdev_ops; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; return dev; } static int ep93xx_eth_remove(struct platform_device *pdev) { struct net_device *dev; struct ep93xx_priv *ep; dev = platform_get_drvdata(pdev); if (dev == NULL) return 0; platform_set_drvdata(pdev, NULL); ep = netdev_priv(dev); /* @@@ Force down. */ unregister_netdev(dev); ep93xx_free_buffers(ep); if (ep->base_addr != NULL) iounmap(ep->base_addr); if (ep->res != NULL) { release_resource(ep->res); kfree(ep->res); } free_netdev(dev); return 0; } static int ep93xx_eth_probe(struct platform_device *pdev) { struct ep93xx_eth_data *data; struct net_device *dev; struct ep93xx_priv *ep; struct resource *mem; int irq; int err; if (pdev == NULL) return -ENODEV; data = pdev->dev.platform_data; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!mem || irq < 0) return -ENXIO; dev = ep93xx_dev_alloc(data); if (dev == NULL) { err = -ENOMEM; goto err_out; } ep = netdev_priv(dev); ep->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); platform_set_drvdata(pdev, dev); ep->res = request_mem_region(mem->start, resource_size(mem), dev_name(&pdev->dev)); if (ep->res == NULL) { dev_err(&pdev->dev, "Could not reserve memory region\n"); err = -ENOMEM; goto err_out; } ep->base_addr = ioremap(mem->start, resource_size(mem)); if (ep->base_addr == NULL) { dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); err = -EIO; goto err_out; } ep->irq = irq; ep->mii.phy_id = data->phy_id; ep->mii.phy_id_mask = 0x1f; ep->mii.reg_num_mask = 0x1f; ep->mii.dev = dev; ep->mii.mdio_read = ep93xx_mdio_read; ep->mii.mdio_write = ep93xx_mdio_write; ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ if (is_zero_ether_addr(dev->dev_addr)) eth_hw_addr_random(dev); err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); goto err_out; } printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n", dev->name, ep->irq, dev->dev_addr); return 0; err_out: ep93xx_eth_remove(pdev); return err; } static struct platform_driver ep93xx_eth_driver = { .probe = ep93xx_eth_probe, .remove = ep93xx_eth_remove, .driver = { .name = "ep93xx-eth", .owner = THIS_MODULE, }, }; module_platform_driver(ep93xx_eth_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ep93xx-eth");
gpl-2.0
Hardslog/android_kernel_asus_ze551kl
drivers/net/ethernet/nuvoton/w90p910_ether.c
2032
26208
/* * Copyright (c) 2008-2009 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gfp.h> #define DRV_MODULE_NAME "w90p910-emc" #define DRV_MODULE_VERSION "0.1" /* Ethernet MAC Registers */ #define REG_CAMCMR 0x00 #define REG_CAMEN 0x04 #define REG_CAMM_BASE 0x08 #define REG_CAML_BASE 0x0c #define REG_TXDLSA 0x88 #define REG_RXDLSA 0x8C #define REG_MCMDR 0x90 #define REG_MIID 0x94 #define REG_MIIDA 0x98 #define REG_FFTCR 0x9C #define REG_TSDR 0xa0 #define REG_RSDR 0xa4 #define REG_DMARFC 0xa8 #define REG_MIEN 0xac #define REG_MISTA 0xb0 #define REG_CTXDSA 0xcc #define REG_CTXBSA 0xd0 #define REG_CRXDSA 0xd4 #define REG_CRXBSA 0xd8 /* mac controller bit */ #define MCMDR_RXON 0x01 #define MCMDR_ACP (0x01 << 3) #define MCMDR_SPCRC (0x01 << 5) #define MCMDR_TXON (0x01 << 8) #define MCMDR_FDUP (0x01 << 18) #define MCMDR_ENMDC (0x01 << 19) #define MCMDR_OPMOD (0x01 << 20) #define SWR (0x01 << 24) /* cam command regiser */ #define CAMCMR_AUP 0x01 #define CAMCMR_AMP (0x01 << 1) #define CAMCMR_ABP (0x01 << 2) #define CAMCMR_CCAM (0x01 << 3) #define CAMCMR_ECMP (0x01 << 4) #define CAM0EN 0x01 /* mac mii controller bit */ #define MDCCR (0x0a << 20) #define PHYAD (0x01 << 8) #define PHYWR (0x01 << 16) #define PHYBUSY (0x01 << 17) #define PHYPRESP (0x01 << 18) #define CAM_ENTRY_SIZE 0x08 /* rx and tx status */ #define TXDS_TXCP (0x01 << 19) #define RXDS_CRCE (0x01 << 17) #define RXDS_PTLE (0x01 << 19) #define RXDS_RXGD (0x01 << 20) #define RXDS_ALIE (0x01 << 21) #define RXDS_RP (0x01 << 22) /* mac interrupt status*/ #define MISTA_EXDEF (0x01 << 19) #define MISTA_TXBERR (0x01 << 24) #define MISTA_TDU (0x01 << 23) #define MISTA_RDU (0x01 << 10) #define MISTA_RXBERR (0x01 << 11) #define ENSTART 0x01 #define ENRXINTR 0x01 #define ENRXGD (0x01 << 4) #define ENRXBERR (0x01 << 11) #define ENTXINTR (0x01 << 16) #define ENTXCP (0x01 << 18) #define ENTXABT (0x01 << 21) #define ENTXBERR (0x01 << 24) #define ENMDC (0x01 << 19) #define PHYBUSY (0x01 << 17) #define MDCCR_VAL 0xa00000 /* rx and tx owner bit */ #define RX_OWEN_DMA (0x01 << 31) #define RX_OWEN_CPU (~(0x03 << 30)) #define TX_OWEN_DMA (0x01 << 31) #define TX_OWEN_CPU (~(0x01 << 31)) /* tx frame desc controller bit */ #define MACTXINTEN 0x04 #define CRCMODE 0x02 #define PADDINGMODE 0x01 /* fftcr controller bit */ #define TXTHD (0x03 << 8) #define BLENGTH (0x01 << 20) /* global setting for driver */ #define RX_DESC_SIZE 50 #define TX_DESC_SIZE 10 #define MAX_RBUFF_SZ 0x600 #define MAX_TBUFF_SZ 0x600 #define TX_TIMEOUT (HZ/2) #define DELAY 1000 #define CAM0 0x0 static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg); struct w90p910_rxbd { unsigned int sl; unsigned int buffer; unsigned int reserved; unsigned int next; }; struct w90p910_txbd { unsigned int mode; unsigned int buffer; unsigned int sl; unsigned int next; }; struct recv_pdesc { struct w90p910_rxbd desclist[RX_DESC_SIZE]; char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ]; }; struct tran_pdesc { struct w90p910_txbd desclist[TX_DESC_SIZE]; char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ]; }; struct w90p910_ether { struct recv_pdesc *rdesc; struct tran_pdesc *tdesc; dma_addr_t rdesc_phys; dma_addr_t tdesc_phys; struct net_device_stats stats; struct platform_device *pdev; struct resource *res; struct sk_buff *skb; struct clk *clk; struct clk *rmiiclk; struct mii_if_info mii; struct timer_list check_timer; void __iomem *reg; int rxirq; int txirq; unsigned int cur_tx; unsigned int cur_rx; unsigned int finish_tx; unsigned int rx_packets; unsigned int rx_bytes; unsigned int start_tx_ptr; unsigned int start_rx_ptr; unsigned int linkflag; }; static void update_linkspeed_register(struct net_device *dev, unsigned int speed, unsigned int duplex) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (speed == SPEED_100) { /* 100 full/half duplex */ if (duplex == DUPLEX_FULL) { val |= (MCMDR_OPMOD | MCMDR_FDUP); } else { val |= MCMDR_OPMOD; val &= ~MCMDR_FDUP; } } else { /* 10 full/half duplex */ if (duplex == DUPLEX_FULL) { val |= MCMDR_FDUP; val &= ~MCMDR_OPMOD; } else { val &= ~(MCMDR_FDUP | MCMDR_OPMOD); } } __raw_writel(val, ether->reg + REG_MCMDR); } static void update_linkspeed(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int bmsr, bmcr, lpa, speed, duplex; pdev = ether->pdev; if (!mii_link_ok(&ether->mii)) { ether->linkflag = 0x0; netif_carrier_off(dev); dev_warn(&pdev->dev, "%s: Link down.\n", dev->name); return; } if (ether->linkflag == 1) return; bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR); bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { if (!(bmsr & BMSR_ANEGCOMPLETE)) return; lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA); if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100; else speed = SPEED_10; if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL; else duplex = DUPLEX_HALF; } else { speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } update_linkspeed_register(dev, speed, duplex); dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); ether->linkflag = 0x01; netif_carrier_on(dev); } static void w90p910_check_link(unsigned long dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct w90p910_ether *ether = netdev_priv(dev); update_linkspeed(dev); mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000)); } static void w90p910_write_cam(struct net_device *dev, unsigned int x, unsigned char *pval) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int msw, lsw; msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3]; lsw = (pval[4] << 24) | (pval[5] << 16); __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE); __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE); } static int w90p910_init_desc(struct net_device *dev) { struct w90p910_ether *ether; struct w90p910_txbd *tdesc; struct w90p910_rxbd *rdesc; struct platform_device *pdev; unsigned int i; ether = netdev_priv(dev); pdev = ether->pdev; ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), &ether->tdesc_phys, GFP_KERNEL); if (!ether->tdesc) return -ENOMEM; ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc), &ether->rdesc_phys, GFP_KERNEL); if (!ether->rdesc) { dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), ether->tdesc, ether->tdesc_phys); return -ENOMEM; } for (i = 0; i < TX_DESC_SIZE; i++) { unsigned int offset; tdesc = &(ether->tdesc->desclist[i]); if (i == TX_DESC_SIZE - 1) offset = offsetof(struct tran_pdesc, desclist[0]); else offset = offsetof(struct tran_pdesc, desclist[i + 1]); tdesc->next = ether->tdesc_phys + offset; tdesc->buffer = ether->tdesc_phys + offsetof(struct tran_pdesc, tran_buf[i]); tdesc->sl = 0; tdesc->mode = 0; } ether->start_tx_ptr = ether->tdesc_phys; for (i = 0; i < RX_DESC_SIZE; i++) { unsigned int offset; rdesc = &(ether->rdesc->desclist[i]); if (i == RX_DESC_SIZE - 1) offset = offsetof(struct recv_pdesc, desclist[0]); else offset = offsetof(struct recv_pdesc, desclist[i + 1]); rdesc->next = ether->rdesc_phys + offset; rdesc->sl = RX_OWEN_DMA; rdesc->buffer = ether->rdesc_phys + offsetof(struct recv_pdesc, recv_buf[i]); } ether->start_rx_ptr = ether->rdesc_phys; return 0; } static void w90p910_set_fifo_threshold(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = TXTHD | BLENGTH; __raw_writel(val, ether->reg + REG_FFTCR); } static void w90p910_return_default_idle(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); val |= SWR; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_trigger_rx(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ENSTART, ether->reg + REG_RSDR); } static void w90p910_trigger_tx(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ENSTART, ether->reg + REG_TSDR); } static void w90p910_enable_mac_interrupt(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP; val |= ENTXBERR | ENRXBERR | ENTXABT; __raw_writel(val, ether->reg + REG_MIEN); } static void w90p910_get_and_clear_int(struct net_device *dev, unsigned int *val) { struct w90p910_ether *ether = netdev_priv(dev); *val = __raw_readl(ether->reg + REG_MISTA); __raw_writel(*val, ether->reg + REG_MISTA); } static void w90p910_set_global_maccmd(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_enable_cam(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; w90p910_write_cam(dev, CAM0, dev->dev_addr); val = __raw_readl(ether->reg + REG_CAMEN); val |= CAM0EN; __raw_writel(val, ether->reg + REG_CAMEN); } static void w90p910_enable_cam_command(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP; __raw_writel(val, ether->reg + REG_CAMCMR); } static void w90p910_enable_tx(struct net_device *dev, unsigned int enable) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (enable) val |= MCMDR_TXON; else val &= ~MCMDR_TXON; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_enable_rx(struct net_device *dev, unsigned int enable) { struct w90p910_ether *ether = netdev_priv(dev); unsigned int val; val = __raw_readl(ether->reg + REG_MCMDR); if (enable) val |= MCMDR_RXON; else val &= ~MCMDR_RXON; __raw_writel(val, ether->reg + REG_MCMDR); } static void w90p910_set_curdest(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA); __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA); } static void w90p910_reset_mac(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); w90p910_enable_tx(dev, 0); w90p910_enable_rx(dev, 0); w90p910_set_fifo_threshold(dev); w90p910_return_default_idle(dev); if (!netif_queue_stopped(dev)) netif_stop_queue(dev); w90p910_init_desc(dev); dev->trans_start = jiffies; /* prevent tx timeout */ ether->cur_tx = 0x0; ether->finish_tx = 0x0; ether->cur_rx = 0x0; w90p910_set_curdest(dev); w90p910_enable_cam(dev); w90p910_enable_cam_command(dev); w90p910_enable_mac_interrupt(dev); w90p910_enable_tx(dev, 1); w90p910_enable_rx(dev, 1); w90p910_trigger_tx(dev); w90p910_trigger_rx(dev); dev->trans_start = jiffies; /* prevent tx timeout */ if (netif_queue_stopped(dev)) netif_wake_queue(dev); } static void w90p910_mdio_write(struct net_device *dev, int phy_id, int reg, int data) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int val, i; pdev = ether->pdev; __raw_writel(data, ether->reg + REG_MIID); val = (phy_id << 0x08) | reg; val |= PHYBUSY | PHYWR | MDCCR_VAL; __raw_writel(val, ether->reg + REG_MIIDA); for (i = 0; i < DELAY; i++) { if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) break; } if (i == DELAY) dev_warn(&pdev->dev, "mdio write timed out\n"); } static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; unsigned int val, i, data; pdev = ether->pdev; val = (phy_id << 0x08) | reg; val |= PHYBUSY | MDCCR_VAL; __raw_writel(val, ether->reg + REG_MIIDA); for (i = 0; i < DELAY; i++) { if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) break; } if (i == DELAY) { dev_warn(&pdev->dev, "mdio read timed out\n"); data = 0xffff; } else { data = __raw_readl(ether->reg + REG_MIID); } return data; } static int w90p910_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *address = addr; if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, address->sa_data, dev->addr_len); w90p910_write_cam(dev, CAM0, dev->dev_addr); return 0; } static int w90p910_ether_close(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; pdev = ether->pdev; dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc), ether->rdesc, ether->rdesc_phys); dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), ether->tdesc, ether->tdesc_phys); netif_stop_queue(dev); del_timer_sync(&ether->check_timer); clk_disable(ether->rmiiclk); clk_disable(ether->clk); free_irq(ether->txirq, dev); free_irq(ether->rxirq, dev); return 0; } static struct net_device_stats *w90p910_ether_stats(struct net_device *dev) { struct w90p910_ether *ether; ether = netdev_priv(dev); return &ether->stats; } static int w90p910_send_frame(struct net_device *dev, unsigned char *data, int length) { struct w90p910_ether *ether; struct w90p910_txbd *txbd; struct platform_device *pdev; unsigned char *buffer; ether = netdev_priv(dev); pdev = ether->pdev; txbd = &ether->tdesc->desclist[ether->cur_tx]; buffer = ether->tdesc->tran_buf[ether->cur_tx]; if (length > 1514) { dev_err(&pdev->dev, "send data %d bytes, check it\n", length); length = 1514; } txbd->sl = length & 0xFFFF; memcpy(buffer, data, length); txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN; w90p910_enable_tx(dev, 1); w90p910_trigger_tx(dev); if (++ether->cur_tx >= TX_DESC_SIZE) ether->cur_tx = 0; txbd = &ether->tdesc->desclist[ether->cur_tx]; if (txbd->mode & TX_OWEN_DMA) netif_stop_queue(dev); return 0; } static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); if (!(w90p910_send_frame(dev, skb->data, skb->len))) { ether->skb = skb; dev_kfree_skb_irq(skb); return 0; } return -EAGAIN; } static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id) { struct w90p910_ether *ether; struct w90p910_txbd *txbd; struct platform_device *pdev; struct net_device *dev; unsigned int cur_entry, entry, status; dev = dev_id; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_get_and_clear_int(dev, &status); cur_entry = __raw_readl(ether->reg + REG_CTXDSA); entry = ether->tdesc_phys + offsetof(struct tran_pdesc, desclist[ether->finish_tx]); while (entry != cur_entry) { txbd = &ether->tdesc->desclist[ether->finish_tx]; if (++ether->finish_tx >= TX_DESC_SIZE) ether->finish_tx = 0; if (txbd->sl & TXDS_TXCP) { ether->stats.tx_packets++; ether->stats.tx_bytes += txbd->sl & 0xFFFF; } else { ether->stats.tx_errors++; } txbd->sl = 0x0; txbd->mode = 0x0; if (netif_queue_stopped(dev)) netif_wake_queue(dev); entry = ether->tdesc_phys + offsetof(struct tran_pdesc, desclist[ether->finish_tx]); } if (status & MISTA_EXDEF) { dev_err(&pdev->dev, "emc defer exceed interrupt\n"); } else if (status & MISTA_TXBERR) { dev_err(&pdev->dev, "emc bus error interrupt\n"); w90p910_reset_mac(dev); } else if (status & MISTA_TDU) { if (netif_queue_stopped(dev)) netif_wake_queue(dev); } return IRQ_HANDLED; } static void netdev_rx(struct net_device *dev) { struct w90p910_ether *ether; struct w90p910_rxbd *rxbd; struct platform_device *pdev; struct sk_buff *skb; unsigned char *data; unsigned int length, status, val, entry; ether = netdev_priv(dev); pdev = ether->pdev; rxbd = &ether->rdesc->desclist[ether->cur_rx]; do { val = __raw_readl(ether->reg + REG_CRXDSA); entry = ether->rdesc_phys + offsetof(struct recv_pdesc, desclist[ether->cur_rx]); if (val == entry) break; status = rxbd->sl; length = status & 0xFFFF; if (status & RXDS_RXGD) { data = ether->rdesc->recv_buf[ether->cur_rx]; skb = netdev_alloc_skb(dev, length + 2); if (!skb) { ether->stats.rx_dropped++; return; } skb_reserve(skb, 2); skb_put(skb, length); skb_copy_to_linear_data(skb, data, length); skb->protocol = eth_type_trans(skb, dev); ether->stats.rx_packets++; ether->stats.rx_bytes += length; netif_rx(skb); } else { ether->stats.rx_errors++; if (status & RXDS_RP) { dev_err(&pdev->dev, "rx runt err\n"); ether->stats.rx_length_errors++; } else if (status & RXDS_CRCE) { dev_err(&pdev->dev, "rx crc err\n"); ether->stats.rx_crc_errors++; } else if (status & RXDS_ALIE) { dev_err(&pdev->dev, "rx aligment err\n"); ether->stats.rx_frame_errors++; } else if (status & RXDS_PTLE) { dev_err(&pdev->dev, "rx longer err\n"); ether->stats.rx_over_errors++; } } rxbd->sl = RX_OWEN_DMA; rxbd->reserved = 0x0; if (++ether->cur_rx >= RX_DESC_SIZE) ether->cur_rx = 0; rxbd = &ether->rdesc->desclist[ether->cur_rx]; } while (1); } static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id) { struct net_device *dev; struct w90p910_ether *ether; struct platform_device *pdev; unsigned int status; dev = dev_id; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_get_and_clear_int(dev, &status); if (status & MISTA_RDU) { netdev_rx(dev); w90p910_trigger_rx(dev); return IRQ_HANDLED; } else if (status & MISTA_RXBERR) { dev_err(&pdev->dev, "emc rx bus error\n"); w90p910_reset_mac(dev); } netdev_rx(dev); return IRQ_HANDLED; } static int w90p910_ether_open(struct net_device *dev) { struct w90p910_ether *ether; struct platform_device *pdev; ether = netdev_priv(dev); pdev = ether->pdev; w90p910_reset_mac(dev); w90p910_set_fifo_threshold(dev); w90p910_set_curdest(dev); w90p910_enable_cam(dev); w90p910_enable_cam_command(dev); w90p910_enable_mac_interrupt(dev); w90p910_set_global_maccmd(dev); w90p910_enable_rx(dev, 1); clk_enable(ether->rmiiclk); clk_enable(ether->clk); ether->rx_packets = 0x0; ether->rx_bytes = 0x0; if (request_irq(ether->txirq, w90p910_tx_interrupt, 0x0, pdev->name, dev)) { dev_err(&pdev->dev, "register irq tx failed\n"); return -EAGAIN; } if (request_irq(ether->rxirq, w90p910_rx_interrupt, 0x0, pdev->name, dev)) { dev_err(&pdev->dev, "register irq rx failed\n"); free_irq(ether->txirq, dev); return -EAGAIN; } mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000)); netif_start_queue(dev); w90p910_trigger_rx(dev); dev_info(&pdev->dev, "%s is OPENED\n", dev->name); return 0; } static void w90p910_ether_set_multicast_list(struct net_device *dev) { struct w90p910_ether *ether; unsigned int rx_mode; ether = netdev_priv(dev); if (dev->flags & IFF_PROMISC) rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; else rx_mode = CAMCMR_ECMP | CAMCMR_ABP; __raw_writel(rx_mode, ether->reg + REG_CAMCMR); } static int w90p910_ether_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct w90p910_ether *ether = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); return generic_mii_ioctl(&ether->mii, data, cmd, NULL); } static void w90p910_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct w90p910_ether *ether = netdev_priv(dev); return mii_ethtool_gset(&ether->mii, cmd); } static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct w90p910_ether *ether = netdev_priv(dev); return mii_ethtool_sset(&ether->mii, cmd); } static int w90p910_nway_reset(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); return mii_nway_restart(&ether->mii); } static u32 w90p910_get_link(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); return mii_link_ok(&ether->mii); } static const struct ethtool_ops w90p910_ether_ethtool_ops = { .get_settings = w90p910_get_settings, .set_settings = w90p910_set_settings, .get_drvinfo = w90p910_get_drvinfo, .nway_reset = w90p910_nway_reset, .get_link = w90p910_get_link, }; static const struct net_device_ops w90p910_ether_netdev_ops = { .ndo_open = w90p910_ether_open, .ndo_stop = w90p910_ether_close, .ndo_start_xmit = w90p910_ether_start_xmit, .ndo_get_stats = w90p910_ether_stats, .ndo_set_rx_mode = w90p910_ether_set_multicast_list, .ndo_set_mac_address = w90p910_set_mac_address, .ndo_do_ioctl = w90p910_ether_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, }; static void __init get_mac_address(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; char addr[6]; pdev = ether->pdev; addr[0] = 0x00; addr[1] = 0x02; addr[2] = 0xac; addr[3] = 0x55; addr[4] = 0x88; addr[5] = 0xa8; if (is_valid_ether_addr(addr)) memcpy(dev->dev_addr, &addr, 0x06); else dev_err(&pdev->dev, "invalid mac address\n"); } static int w90p910_ether_setup(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); ether_setup(dev); dev->netdev_ops = &w90p910_ether_netdev_ops; dev->ethtool_ops = &w90p910_ether_ethtool_ops; dev->tx_queue_len = 16; dev->dma = 0x0; dev->watchdog_timeo = TX_TIMEOUT; get_mac_address(dev); ether->cur_tx = 0x0; ether->cur_rx = 0x0; ether->finish_tx = 0x0; ether->linkflag = 0x0; ether->mii.phy_id = 0x01; ether->mii.phy_id_mask = 0x1f; ether->mii.reg_num_mask = 0x1f; ether->mii.dev = dev; ether->mii.mdio_read = w90p910_mdio_read; ether->mii.mdio_write = w90p910_mdio_write; setup_timer(&ether->check_timer, w90p910_check_link, (unsigned long)dev); return 0; } static int w90p910_ether_probe(struct platform_device *pdev) { struct w90p910_ether *ether; struct net_device *dev; int error; dev = alloc_etherdev(sizeof(struct w90p910_ether)); if (!dev) return -ENOMEM; ether = netdev_priv(dev); ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (ether->res == NULL) { dev_err(&pdev->dev, "failed to get I/O memory\n"); error = -ENXIO; goto failed_free; } if (!request_mem_region(ether->res->start, resource_size(ether->res), pdev->name)) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto failed_free; } ether->reg = ioremap(ether->res->start, resource_size(ether->res)); if (ether->reg == NULL) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; goto failed_free_mem; } ether->txirq = platform_get_irq(pdev, 0); if (ether->txirq < 0) { dev_err(&pdev->dev, "failed to get ether tx irq\n"); error = -ENXIO; goto failed_free_io; } ether->rxirq = platform_get_irq(pdev, 1); if (ether->rxirq < 0) { dev_err(&pdev->dev, "failed to get ether rx irq\n"); error = -ENXIO; goto failed_free_txirq; } platform_set_drvdata(pdev, dev); ether->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(ether->clk)) { dev_err(&pdev->dev, "failed to get ether clock\n"); error = PTR_ERR(ether->clk); goto failed_free_rxirq; } ether->rmiiclk = clk_get(&pdev->dev, "RMII"); if (IS_ERR(ether->rmiiclk)) { dev_err(&pdev->dev, "failed to get ether clock\n"); error = PTR_ERR(ether->rmiiclk); goto failed_put_clk; } ether->pdev = pdev; w90p910_ether_setup(dev); error = register_netdev(dev); if (error != 0) { dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n"); error = -ENODEV; goto failed_put_rmiiclk; } return 0; failed_put_rmiiclk: clk_put(ether->rmiiclk); failed_put_clk: clk_put(ether->clk); failed_free_rxirq: free_irq(ether->rxirq, pdev); platform_set_drvdata(pdev, NULL); failed_free_txirq: free_irq(ether->txirq, pdev); failed_free_io: iounmap(ether->reg); failed_free_mem: release_mem_region(ether->res->start, resource_size(ether->res)); failed_free: free_netdev(dev); return error; } static int w90p910_ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct w90p910_ether *ether = netdev_priv(dev); unregister_netdev(dev); clk_put(ether->rmiiclk); clk_put(ether->clk); iounmap(ether->reg); release_mem_region(ether->res->start, resource_size(ether->res)); free_irq(ether->txirq, dev); free_irq(ether->rxirq, dev); del_timer_sync(&ether->check_timer); platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } static struct platform_driver w90p910_ether_driver = { .probe = w90p910_ether_probe, .remove = w90p910_ether_remove, .driver = { .name = "nuc900-emc", .owner = THIS_MODULE, }, }; module_platform_driver(w90p910_ether_driver); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("w90p910 MAC driver!"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:nuc900-emc");
gpl-2.0
dolorespark/android_kernel_hisense_m470bsa
drivers/misc/carma/carma-fpga-program.c
2800
28067
/* * CARMA Board DATA-FPGA Programmer * * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/dma-mapping.h> #include <linux/of_platform.h> #include <linux/completion.h> #include <linux/miscdevice.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/leds.h> #include <linux/slab.h> #include <linux/kref.h> #include <linux/fs.h> #include <linux/io.h> #include <media/videobuf-dma-sg.h> /* MPC8349EMDS specific get_immrbase() */ #include <sysdev/fsl_soc.h> static const char drv_name[] = "carma-fpga-program"; /* * Firmware images are always this exact size * * 12849552 bytes for a CARMA Digitizer Board (EP2S90 FPGAs) * 18662880 bytes for a CARMA Correlator Board (EP2S130 FPGAs) */ #define FW_SIZE_EP2S90 12849552 #define FW_SIZE_EP2S130 18662880 struct fpga_dev { struct miscdevice miscdev; /* Reference count */ struct kref ref; /* Device Registers */ struct device *dev; void __iomem *regs; void __iomem *immr; /* Freescale DMA Device */ struct dma_chan *chan; /* Interrupts */ int irq, status; struct completion completion; /* FPGA Bitfile */ struct mutex lock; struct videobuf_dmabuf vb; bool vb_allocated; /* max size and written bytes */ size_t fw_size; size_t bytes; }; /* * FPGA Bitfile Helpers */ /** * fpga_drop_firmware_data() - drop the bitfile image from memory * @priv: the driver's private data structure * * LOCKING: must hold priv->lock */ static void fpga_drop_firmware_data(struct fpga_dev *priv) { videobuf_dma_free(&priv->vb); priv->vb_allocated = false; priv->bytes = 0; } /* * Private Data Reference Count */ static void fpga_dev_remove(struct kref *ref) { struct fpga_dev *priv = container_of(ref, struct fpga_dev, ref); /* free any firmware image that was not programmed */ fpga_drop_firmware_data(priv); mutex_destroy(&priv->lock); kfree(priv); } /* * LED Trigger (could be a seperate module) */ /* * NOTE: this whole thing does have the problem that whenever the led's are * NOTE: first set to use the fpga trigger, they could be in the wrong state */ DEFINE_LED_TRIGGER(ledtrig_fpga); static void ledtrig_fpga_programmed(bool enabled) { if (enabled) led_trigger_event(ledtrig_fpga, LED_FULL); else led_trigger_event(ledtrig_fpga, LED_OFF); } /* * FPGA Register Helpers */ /* Register Definitions */ #define FPGA_CONFIG_CONTROL 0x40 #define FPGA_CONFIG_STATUS 0x44 #define FPGA_CONFIG_FIFO_SIZE 0x48 #define FPGA_CONFIG_FIFO_USED 0x4C #define FPGA_CONFIG_TOTAL_BYTE_COUNT 0x50 #define FPGA_CONFIG_CUR_BYTE_COUNT 0x54 #define FPGA_FIFO_ADDRESS 0x3000 static int fpga_fifo_size(void __iomem *regs) { return ioread32be(regs + FPGA_CONFIG_FIFO_SIZE); } #define CFG_STATUS_ERR_MASK 0xfffe static int fpga_config_error(void __iomem *regs) { return ioread32be(regs + FPGA_CONFIG_STATUS) & CFG_STATUS_ERR_MASK; } static int fpga_fifo_empty(void __iomem *regs) { return ioread32be(regs + FPGA_CONFIG_FIFO_USED) == 0; } static void fpga_fifo_write(void __iomem *regs, u32 val) { iowrite32be(val, regs + FPGA_FIFO_ADDRESS); } static void fpga_set_byte_count(void __iomem *regs, u32 count) { iowrite32be(count, regs + FPGA_CONFIG_TOTAL_BYTE_COUNT); } #define CFG_CTL_ENABLE (1 << 0) #define CFG_CTL_RESET (1 << 1) #define CFG_CTL_DMA (1 << 2) static void fpga_programmer_enable(struct fpga_dev *priv, bool dma) { u32 val; val = (dma) ? (CFG_CTL_ENABLE | CFG_CTL_DMA) : CFG_CTL_ENABLE; iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL); } static void fpga_programmer_disable(struct fpga_dev *priv) { iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL); } static void fpga_dump_registers(struct fpga_dev *priv) { u32 control, status, size, used, total, curr; /* good status: do nothing */ if (priv->status == 0) return; /* Dump all status registers */ control = ioread32be(priv->regs + FPGA_CONFIG_CONTROL); status = ioread32be(priv->regs + FPGA_CONFIG_STATUS); size = ioread32be(priv->regs + FPGA_CONFIG_FIFO_SIZE); used = ioread32be(priv->regs + FPGA_CONFIG_FIFO_USED); total = ioread32be(priv->regs + FPGA_CONFIG_TOTAL_BYTE_COUNT); curr = ioread32be(priv->regs + FPGA_CONFIG_CUR_BYTE_COUNT); dev_err(priv->dev, "Configuration failed, dumping status registers\n"); dev_err(priv->dev, "Control: 0x%.8x\n", control); dev_err(priv->dev, "Status: 0x%.8x\n", status); dev_err(priv->dev, "FIFO Size: 0x%.8x\n", size); dev_err(priv->dev, "FIFO Used: 0x%.8x\n", used); dev_err(priv->dev, "FIFO Total: 0x%.8x\n", total); dev_err(priv->dev, "FIFO Curr: 0x%.8x\n", curr); } /* * FPGA Power Supply Code */ #define CTL_PWR_CONTROL 0x2006 #define CTL_PWR_STATUS 0x200A #define CTL_PWR_FAIL 0x200B #define PWR_CONTROL_ENABLE 0x01 #define PWR_STATUS_ERROR_MASK 0x10 #define PWR_STATUS_GOOD 0x0f /* * Determine if the FPGA power is good for all supplies */ static bool fpga_power_good(struct fpga_dev *priv) { u8 val; val = ioread8(priv->regs + CTL_PWR_STATUS); if (val & PWR_STATUS_ERROR_MASK) return false; return val == PWR_STATUS_GOOD; } /* * Disable the FPGA power supplies */ static void fpga_disable_power_supplies(struct fpga_dev *priv) { unsigned long start; u8 val; iowrite8(0x0, priv->regs + CTL_PWR_CONTROL); /* * Wait 500ms for the power rails to discharge * * Without this delay, the CTL-CPLD state machine can get into a * state where it is waiting for the power-goods to assert, but they * never do. This only happens when enabling and disabling the * power sequencer very rapidly. * * The loop below will also wait for the power goods to de-assert, * but testing has shown that they are always disabled by the time * the sleep completes. However, omitting the sleep and only waiting * for the power-goods to de-assert was not sufficient to ensure * that the power sequencer would not wedge itself. */ msleep(500); start = jiffies; while (time_before(jiffies, start + HZ)) { val = ioread8(priv->regs + CTL_PWR_STATUS); if (!(val & PWR_STATUS_GOOD)) break; usleep_range(5000, 10000); } val = ioread8(priv->regs + CTL_PWR_STATUS); if (val & PWR_STATUS_GOOD) { dev_err(priv->dev, "power disable failed: " "power goods: status 0x%.2x\n", val); } if (val & PWR_STATUS_ERROR_MASK) { dev_err(priv->dev, "power disable failed: " "alarm bit set: status 0x%.2x\n", val); } } /** * fpga_enable_power_supplies() - enable the DATA-FPGA power supplies * @priv: the driver's private data structure * * Enable the DATA-FPGA power supplies, waiting up to 1 second for * them to enable successfully. * * Returns 0 on success, -ERRNO otherwise */ static int fpga_enable_power_supplies(struct fpga_dev *priv) { unsigned long start = jiffies; if (fpga_power_good(priv)) { dev_dbg(priv->dev, "power was already good\n"); return 0; } iowrite8(PWR_CONTROL_ENABLE, priv->regs + CTL_PWR_CONTROL); while (time_before(jiffies, start + HZ)) { if (fpga_power_good(priv)) return 0; usleep_range(5000, 10000); } return fpga_power_good(priv) ? 0 : -ETIMEDOUT; } /* * Determine if the FPGA power supplies are all enabled */ static bool fpga_power_enabled(struct fpga_dev *priv) { u8 val; val = ioread8(priv->regs + CTL_PWR_CONTROL); if (val & PWR_CONTROL_ENABLE) return true; return false; } /* * Determine if the FPGA's are programmed and running correctly */ static bool fpga_running(struct fpga_dev *priv) { if (!fpga_power_good(priv)) return false; /* Check the config done bit */ return ioread32be(priv->regs + FPGA_CONFIG_STATUS) & (1 << 18); } /* * FPGA Programming Code */ /** * fpga_program_block() - put a block of data into the programmer's FIFO * @priv: the driver's private data structure * @buf: the data to program * @count: the length of data to program (must be a multiple of 4 bytes) * * Returns 0 on success, -ERRNO otherwise */ static int fpga_program_block(struct fpga_dev *priv, void *buf, size_t count) { u32 *data = buf; int size = fpga_fifo_size(priv->regs); int i, len; unsigned long timeout; /* enforce correct data length for the FIFO */ BUG_ON(count % 4 != 0); while (count > 0) { /* Get the size of the block to write (maximum is FIFO_SIZE) */ len = min_t(size_t, count, size); timeout = jiffies + HZ / 4; /* Write the block */ for (i = 0; i < len / 4; i++) fpga_fifo_write(priv->regs, data[i]); /* Update the amounts left */ count -= len; data += len / 4; /* Wait for the fifo to empty */ while (true) { if (fpga_fifo_empty(priv->regs)) { break; } else { dev_dbg(priv->dev, "Fifo not empty\n"); cpu_relax(); } if (fpga_config_error(priv->regs)) { dev_err(priv->dev, "Error detected\n"); return -EIO; } if (time_after(jiffies, timeout)) { dev_err(priv->dev, "Fifo drain timeout\n"); return -ETIMEDOUT; } usleep_range(5000, 10000); } } return 0; } /** * fpga_program_cpu() - program the DATA-FPGA's using the CPU * @priv: the driver's private data structure * * This is useful when the DMA programming method fails. It is possible to * wedge the Freescale DMA controller such that the DMA programming method * always fails. This method has always succeeded. * * Returns 0 on success, -ERRNO otherwise */ static noinline int fpga_program_cpu(struct fpga_dev *priv) { int ret; /* Disable the programmer */ fpga_programmer_disable(priv); /* Set the total byte count */ fpga_set_byte_count(priv->regs, priv->bytes); dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes); /* Enable the controller for programming */ fpga_programmer_enable(priv, false); dev_dbg(priv->dev, "enabled the controller\n"); /* Write each chunk of the FPGA bitfile to FPGA programmer */ ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes); if (ret) goto out_disable_controller; /* Wait for the interrupt handler to signal that programming finished */ ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); if (!ret) { dev_err(priv->dev, "Timed out waiting for completion\n"); ret = -ETIMEDOUT; goto out_disable_controller; } /* Retrieve the status from the interrupt handler */ ret = priv->status; out_disable_controller: fpga_programmer_disable(priv); return ret; } #define FIFO_DMA_ADDRESS 0xf0003000 #define FIFO_MAX_LEN 4096 /** * fpga_program_dma() - program the DATA-FPGA's using the DMA engine * @priv: the driver's private data structure * * Program the DATA-FPGA's using the Freescale DMA engine. This requires that * the engine is programmed such that the hardware DMA request lines can * control the entire DMA transaction. The system controller FPGA then * completely offloads the programming from the CPU. * * Returns 0 on success, -ERRNO otherwise */ static noinline int fpga_program_dma(struct fpga_dev *priv) { struct videobuf_dmabuf *vb = &priv->vb; struct dma_chan *chan = priv->chan; struct dma_async_tx_descriptor *tx; size_t num_pages, len, avail = 0; struct dma_slave_config config; struct scatterlist *sg; struct sg_table table; dma_cookie_t cookie; int ret, i; /* Disable the programmer */ fpga_programmer_disable(priv); /* Allocate a scatterlist for the DMA destination */ num_pages = DIV_ROUND_UP(priv->bytes, FIFO_MAX_LEN); ret = sg_alloc_table(&table, num_pages, GFP_KERNEL); if (ret) { dev_err(priv->dev, "Unable to allocate dst scatterlist\n"); ret = -ENOMEM; goto out_return; } /* * This is an ugly hack * * We fill in a scatterlist as if it were mapped for DMA. This is * necessary because there exists no better structure for this * inside the kernel code. * * As an added bonus, we can use the DMAEngine API for all of this, * rather than inventing another extremely similar API. */ avail = priv->bytes; for_each_sg(table.sgl, sg, num_pages, i) { len = min_t(size_t, avail, FIFO_MAX_LEN); sg_dma_address(sg) = FIFO_DMA_ADDRESS; sg_dma_len(sg) = len; avail -= len; } /* Map the buffer for DMA */ ret = videobuf_dma_map(priv->dev, &priv->vb); if (ret) { dev_err(priv->dev, "Unable to map buffer for DMA\n"); goto out_free_table; } /* * Configure the DMA channel to transfer FIFO_SIZE / 2 bytes per * transaction, and then put it under external control */ memset(&config, 0, sizeof(config)); config.direction = DMA_TO_DEVICE; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG, (unsigned long)&config); if (ret) { dev_err(priv->dev, "DMA slave configuration failed\n"); goto out_dma_unmap; } ret = chan->device->device_control(chan, FSLDMA_EXTERNAL_START, 1); if (ret) { dev_err(priv->dev, "DMA external control setup failed\n"); goto out_dma_unmap; } /* setup and submit the DMA transaction */ tx = chan->device->device_prep_dma_sg(chan, table.sgl, num_pages, vb->sglist, vb->sglen, 0); if (!tx) { dev_err(priv->dev, "Unable to prep DMA transaction\n"); ret = -ENOMEM; goto out_dma_unmap; } cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { dev_err(priv->dev, "Unable to submit DMA transaction\n"); ret = -ENOMEM; goto out_dma_unmap; } dma_async_memcpy_issue_pending(chan); /* Set the total byte count */ fpga_set_byte_count(priv->regs, priv->bytes); dev_dbg(priv->dev, "total byte count %u bytes\n", priv->bytes); /* Enable the controller for DMA programming */ fpga_programmer_enable(priv, true); dev_dbg(priv->dev, "enabled the controller\n"); /* Wait for the interrupt handler to signal that programming finished */ ret = wait_for_completion_timeout(&priv->completion, 2 * HZ); if (!ret) { dev_err(priv->dev, "Timed out waiting for completion\n"); ret = -ETIMEDOUT; goto out_disable_controller; } /* Retrieve the status from the interrupt handler */ ret = priv->status; out_disable_controller: fpga_programmer_disable(priv); out_dma_unmap: videobuf_dma_unmap(priv->dev, vb); out_free_table: sg_free_table(&table); out_return: return ret; } /* * Interrupt Handling */ static irqreturn_t fpga_irq(int irq, void *dev_id) { struct fpga_dev *priv = dev_id; /* Save the status */ priv->status = fpga_config_error(priv->regs) ? -EIO : 0; dev_dbg(priv->dev, "INTERRUPT status %d\n", priv->status); fpga_dump_registers(priv); /* Disabling the programmer clears the interrupt */ fpga_programmer_disable(priv); /* Notify any waiters */ complete(&priv->completion); return IRQ_HANDLED; } /* * SYSFS Helpers */ /** * fpga_do_stop() - deconfigure (reset) the DATA-FPGA's * @priv: the driver's private data structure * * LOCKING: must hold priv->lock */ static int fpga_do_stop(struct fpga_dev *priv) { u32 val; /* Set the led to unprogrammed */ ledtrig_fpga_programmed(false); /* Pulse the config line to reset the FPGA's */ val = CFG_CTL_ENABLE | CFG_CTL_RESET; iowrite32be(val, priv->regs + FPGA_CONFIG_CONTROL); iowrite32be(0x0, priv->regs + FPGA_CONFIG_CONTROL); return 0; } static noinline int fpga_do_program(struct fpga_dev *priv) { int ret; if (priv->bytes != priv->fw_size) { dev_err(priv->dev, "Incorrect bitfile size: got %zu bytes, " "should be %zu bytes\n", priv->bytes, priv->fw_size); return -EINVAL; } if (!fpga_power_enabled(priv)) { dev_err(priv->dev, "Power not enabled\n"); return -EINVAL; } if (!fpga_power_good(priv)) { dev_err(priv->dev, "Power not good\n"); return -EINVAL; } /* Set the LED to unprogrammed */ ledtrig_fpga_programmed(false); /* Try to program the FPGA's using DMA */ ret = fpga_program_dma(priv); /* If DMA failed or doesn't exist, try with CPU */ if (ret) { dev_warn(priv->dev, "Falling back to CPU programming\n"); ret = fpga_program_cpu(priv); } if (ret) { dev_err(priv->dev, "Unable to program FPGA's\n"); return ret; } /* Drop the firmware bitfile from memory */ fpga_drop_firmware_data(priv); dev_dbg(priv->dev, "FPGA programming successful\n"); ledtrig_fpga_programmed(true); return 0; } /* * File Operations */ static int fpga_open(struct inode *inode, struct file *filp) { /* * The miscdevice layer puts our struct miscdevice into the * filp->private_data field. We use this to find our private * data and then overwrite it with our own private structure. */ struct fpga_dev *priv = container_of(filp->private_data, struct fpga_dev, miscdev); unsigned int nr_pages; int ret; /* We only allow one process at a time */ ret = mutex_lock_interruptible(&priv->lock); if (ret) return ret; filp->private_data = priv; kref_get(&priv->ref); /* Truncation: drop any existing data */ if (filp->f_flags & O_TRUNC) priv->bytes = 0; /* Check if we have already allocated a buffer */ if (priv->vb_allocated) return 0; /* Allocate a buffer to hold enough data for the bitfile */ nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE); ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages); if (ret) { dev_err(priv->dev, "unable to allocate data buffer\n"); mutex_unlock(&priv->lock); kref_put(&priv->ref, fpga_dev_remove); return ret; } priv->vb_allocated = true; return 0; } static int fpga_release(struct inode *inode, struct file *filp) { struct fpga_dev *priv = filp->private_data; mutex_unlock(&priv->lock); kref_put(&priv->ref, fpga_dev_remove); return 0; } static ssize_t fpga_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct fpga_dev *priv = filp->private_data; /* FPGA bitfiles have an exact size: disallow anything else */ if (priv->bytes >= priv->fw_size) return -ENOSPC; count = min_t(size_t, priv->fw_size - priv->bytes, count); if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count)) return -EFAULT; priv->bytes += count; return count; } static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct fpga_dev *priv = filp->private_data; count = min_t(size_t, priv->bytes - *f_pos, count); if (copy_to_user(buf, priv->vb.vaddr + *f_pos, count)) return -EFAULT; *f_pos += count; return count; } static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin) { struct fpga_dev *priv = filp->private_data; loff_t newpos; /* only read-only opens are allowed to seek */ if ((filp->f_flags & O_ACCMODE) != O_RDONLY) return -EINVAL; switch (origin) { case SEEK_SET: /* seek relative to the beginning of the file */ newpos = offset; break; case SEEK_CUR: /* seek relative to current position in the file */ newpos = filp->f_pos + offset; break; case SEEK_END: /* seek relative to the end of the file */ newpos = priv->fw_size - offset; break; default: return -EINVAL; } /* check for sanity */ if (newpos > priv->fw_size) return -EINVAL; filp->f_pos = newpos; return newpos; } static const struct file_operations fpga_fops = { .open = fpga_open, .release = fpga_release, .write = fpga_write, .read = fpga_read, .llseek = fpga_llseek, }; /* * Device Attributes */ static ssize_t pfail_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fpga_dev *priv = dev_get_drvdata(dev); u8 val; val = ioread8(priv->regs + CTL_PWR_FAIL); return snprintf(buf, PAGE_SIZE, "0x%.2x\n", val); } static ssize_t pgood_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fpga_dev *priv = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_good(priv)); } static ssize_t penable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fpga_dev *priv = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", fpga_power_enabled(priv)); } static ssize_t penable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fpga_dev *priv = dev_get_drvdata(dev); unsigned long val; int ret; if (strict_strtoul(buf, 0, &val)) return -EINVAL; if (val) { ret = fpga_enable_power_supplies(priv); if (ret) return ret; } else { fpga_do_stop(priv); fpga_disable_power_supplies(priv); } return count; } static ssize_t program_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fpga_dev *priv = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%d\n", fpga_running(priv)); } static ssize_t program_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fpga_dev *priv = dev_get_drvdata(dev); unsigned long val; int ret; if (strict_strtoul(buf, 0, &val)) return -EINVAL; /* We can't have an image writer and be programming simultaneously */ if (mutex_lock_interruptible(&priv->lock)) return -ERESTARTSYS; /* Program or Reset the FPGA's */ ret = val ? fpga_do_program(priv) : fpga_do_stop(priv); if (ret) goto out_unlock; /* Success */ ret = count; out_unlock: mutex_unlock(&priv->lock); return ret; } static DEVICE_ATTR(power_fail, S_IRUGO, pfail_show, NULL); static DEVICE_ATTR(power_good, S_IRUGO, pgood_show, NULL); static DEVICE_ATTR(power_enable, S_IRUGO | S_IWUSR, penable_show, penable_store); static DEVICE_ATTR(program, S_IRUGO | S_IWUSR, program_show, program_store); static struct attribute *fpga_attributes[] = { &dev_attr_power_fail.attr, &dev_attr_power_good.attr, &dev_attr_power_enable.attr, &dev_attr_program.attr, NULL, }; static const struct attribute_group fpga_attr_group = { .attrs = fpga_attributes, }; /* * OpenFirmware Device Subsystem */ #define SYS_REG_VERSION 0x00 #define SYS_REG_GEOGRAPHIC 0x10 static bool dma_filter(struct dma_chan *chan, void *data) { /* * DMA Channel #0 is the only acceptable device * * This probably won't survive an unload/load cycle of the Freescale * DMAEngine driver, but that won't be a problem */ return chan->chan_id == 0 && chan->device->dev_id == 0; } static int fpga_of_remove(struct platform_device *op) { struct fpga_dev *priv = dev_get_drvdata(&op->dev); struct device *this_device = priv->miscdev.this_device; sysfs_remove_group(&this_device->kobj, &fpga_attr_group); misc_deregister(&priv->miscdev); free_irq(priv->irq, priv); irq_dispose_mapping(priv->irq); /* make sure the power supplies are off */ fpga_disable_power_supplies(priv); /* unmap registers */ iounmap(priv->immr); iounmap(priv->regs); dma_release_channel(priv->chan); /* drop our reference to the private data structure */ kref_put(&priv->ref, fpga_dev_remove); return 0; } /* CTL-CPLD Version Register */ #define CTL_CPLD_VERSION 0x2000 static int fpga_of_probe(struct platform_device *op, const struct of_device_id *match) { struct device_node *of_node = op->dev.of_node; struct device *this_device; struct fpga_dev *priv; dma_cap_mask_t mask; u32 ver; int ret; /* Allocate private data */ priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&op->dev, "Unable to allocate private data\n"); ret = -ENOMEM; goto out_return; } /* Setup the miscdevice */ priv->miscdev.minor = MISC_DYNAMIC_MINOR; priv->miscdev.name = drv_name; priv->miscdev.fops = &fpga_fops; kref_init(&priv->ref); dev_set_drvdata(&op->dev, priv); priv->dev = &op->dev; mutex_init(&priv->lock); init_completion(&priv->completion); videobuf_dma_init(&priv->vb); dev_set_drvdata(priv->dev, priv); dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); dma_cap_set(DMA_INTERRUPT, mask); dma_cap_set(DMA_SLAVE, mask); dma_cap_set(DMA_SG, mask); /* Get control of DMA channel #0 */ priv->chan = dma_request_channel(mask, dma_filter, NULL); if (!priv->chan) { dev_err(&op->dev, "Unable to acquire DMA channel #0\n"); ret = -ENODEV; goto out_free_priv; } /* Remap the registers for use */ priv->regs = of_iomap(of_node, 0); if (!priv->regs) { dev_err(&op->dev, "Unable to ioremap registers\n"); ret = -ENOMEM; goto out_dma_release_channel; } /* Remap the IMMR for use */ priv->immr = ioremap(get_immrbase(), 0x100000); if (!priv->immr) { dev_err(&op->dev, "Unable to ioremap IMMR\n"); ret = -ENOMEM; goto out_unmap_regs; } /* * Check that external DMA is configured * * U-Boot does this for us, but we should check it and bail out if * there is a problem. Failing to have this register setup correctly * will cause the DMA controller to transfer a single cacheline * worth of data, then wedge itself. */ if ((ioread32be(priv->immr + 0x114) & 0xE00) != 0xE00) { dev_err(&op->dev, "External DMA control not configured\n"); ret = -ENODEV; goto out_unmap_immr; } /* * Check the CTL-CPLD version * * This driver uses the CTL-CPLD DATA-FPGA power sequencer, and we * don't want to run on any version of the CTL-CPLD that does not use * a compatible register layout. * * v2: changed register layout, added power sequencer * v3: added glitch filter on the i2c overcurrent/overtemp outputs */ ver = ioread8(priv->regs + CTL_CPLD_VERSION); if (ver != 0x02 && ver != 0x03) { dev_err(&op->dev, "CTL-CPLD is not version 0x02 or 0x03!\n"); ret = -ENODEV; goto out_unmap_immr; } /* Set the exact size that the firmware image should be */ ver = ioread32be(priv->regs + SYS_REG_VERSION); priv->fw_size = (ver & (1 << 18)) ? FW_SIZE_EP2S130 : FW_SIZE_EP2S90; /* Find the correct IRQ number */ priv->irq = irq_of_parse_and_map(of_node, 0); if (priv->irq == NO_IRQ) { dev_err(&op->dev, "Unable to find IRQ line\n"); ret = -ENODEV; goto out_unmap_immr; } /* Request the IRQ */ ret = request_irq(priv->irq, fpga_irq, IRQF_SHARED, drv_name, priv); if (ret) { dev_err(&op->dev, "Unable to request IRQ %d\n", priv->irq); ret = -ENODEV; goto out_irq_dispose_mapping; } /* Reset and stop the FPGA's, just in case */ fpga_do_stop(priv); /* Register the miscdevice */ ret = misc_register(&priv->miscdev); if (ret) { dev_err(&op->dev, "Unable to register miscdevice\n"); goto out_free_irq; } /* Create the sysfs files */ this_device = priv->miscdev.this_device; dev_set_drvdata(this_device, priv); ret = sysfs_create_group(&this_device->kobj, &fpga_attr_group); if (ret) { dev_err(&op->dev, "Unable to create sysfs files\n"); goto out_misc_deregister; } dev_info(priv->dev, "CARMA FPGA Programmer: %s rev%s with %s FPGAs\n", (ver & (1 << 17)) ? "Correlator" : "Digitizer", (ver & (1 << 16)) ? "B" : "A", (ver & (1 << 18)) ? "EP2S130" : "EP2S90"); return 0; out_misc_deregister: misc_deregister(&priv->miscdev); out_free_irq: free_irq(priv->irq, priv); out_irq_dispose_mapping: irq_dispose_mapping(priv->irq); out_unmap_immr: iounmap(priv->immr); out_unmap_regs: iounmap(priv->regs); out_dma_release_channel: dma_release_channel(priv->chan); out_free_priv: kref_put(&priv->ref, fpga_dev_remove); out_return: return ret; } static struct of_device_id fpga_of_match[] = { { .compatible = "carma,fpga-programmer", }, {}, }; static struct of_platform_driver fpga_of_driver = { .probe = fpga_of_probe, .remove = fpga_of_remove, .driver = { .name = drv_name, .of_match_table = fpga_of_match, .owner = THIS_MODULE, }, }; /* * Module Init / Exit */ static int __init fpga_init(void) { led_trigger_register_simple("fpga", &ledtrig_fpga); return of_register_platform_driver(&fpga_of_driver); } static void __exit fpga_exit(void) { of_unregister_platform_driver(&fpga_of_driver); led_trigger_unregister_simple(ledtrig_fpga); } MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); MODULE_DESCRIPTION("CARMA Board DATA-FPGA Programmer"); MODULE_LICENSE("GPL"); module_init(fpga_init); module_exit(fpga_exit);
gpl-2.0
nk111/htc-kernel-msm7x30
drivers/media/dvb/ttpci/budget.c
3312
23510
/* * budget.c: driver for the SAA7146 based Budget DVB cards * * Compiled from various sources by Michael Hunold <michael@mihu.de> * * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de> * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * 26feb2004 Support for FS Activy Card (Grundig tuner) by * Michael Dreher <michael@5dot1.de>, * Oliver Endriss <o.endriss@gmx.de> and * Andreas 'randy' Weinberger * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org/ */ #include "budget.h" #include "stv0299.h" #include "ves1x93.h" #include "ves1820.h" #include "l64781.h" #include "tda8083.h" #include "s5h1420.h" #include "tda10086.h" #include "tda826x.h" #include "lnbp21.h" #include "bsru6.h" #include "bsbe1.h" #include "tdhd1.h" #include "stv6110x.h" #include "stv090x.h" #include "isl6423.h" static int diseqc_method; module_param(diseqc_method, int, 0444); MODULE_PARM_DESC(diseqc_method, "Select DiSEqC method for subsystem id 13c2:1003, 0: default, 1: more reliable (for newer revisions only)"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static void Set22K (struct budget *budget, int state) { struct saa7146_dev *dev=budget->dev; dprintk(2, "budget: %p\n", budget); saa7146_setgpio(dev, 3, (state ? SAA7146_GPIO_OUTHI : SAA7146_GPIO_OUTLO)); } /* Diseqc functions only for TT Budget card */ /* taken from the Skyvision DVB driver by Ralph Metzler <rjkm@metzlerbros.de> */ static void DiseqcSendBit (struct budget *budget, int data) { struct saa7146_dev *dev=budget->dev; dprintk(2, "budget: %p\n", budget); saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); udelay(data ? 500 : 1000); saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); udelay(data ? 1000 : 500); } static void DiseqcSendByte (struct budget *budget, int data) { int i, par=1, d; dprintk(2, "budget: %p\n", budget); for (i=7; i>=0; i--) { d = (data>>i)&1; par ^= d; DiseqcSendBit(budget, d); } DiseqcSendBit(budget, par); } static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long burst) { struct saa7146_dev *dev=budget->dev; int i; dprintk(2, "budget: %p\n", budget); saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); mdelay(16); for (i=0; i<len; i++) DiseqcSendByte(budget, msg[i]); mdelay(16); if (burst!=-1) { if (burst) DiseqcSendByte(budget, 0xff); else { saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); mdelay(12); udelay(500); saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); } msleep(20); } return 0; } /* * Routines for the Fujitsu Siemens Activy budget card * 22 kHz tone and DiSEqC are handled by the frontend. * Voltage must be set here. * GPIO 1: LNBP EN, GPIO 2: LNBP VSEL */ static int SetVoltage_Activy (struct budget *budget, fe_sec_voltage_t voltage) { struct saa7146_dev *dev=budget->dev; dprintk(2, "budget: %p\n", budget); switch (voltage) { case SEC_VOLTAGE_13: saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTLO); break; case SEC_VOLTAGE_18: saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI); break; case SEC_VOLTAGE_OFF: saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); break; default: return -EINVAL; } return 0; } static int siemens_budget_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage) { struct budget* budget = (struct budget*) fe->dvb->priv; return SetVoltage_Activy (budget, voltage); } static int budget_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) { struct budget* budget = (struct budget*) fe->dvb->priv; switch (tone) { case SEC_TONE_ON: Set22K (budget, 1); break; case SEC_TONE_OFF: Set22K (budget, 0); break; default: return -EINVAL; } return 0; } static int budget_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd) { struct budget* budget = (struct budget*) fe->dvb->priv; SendDiSEqCMsg (budget, cmd->msg_len, cmd->msg, 0); return 0; } static int budget_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t minicmd) { struct budget* budget = (struct budget*) fe->dvb->priv; SendDiSEqCMsg (budget, 0, NULL, minicmd); return 0; } static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) { struct budget* budget = (struct budget*) fe->dvb->priv; u8 pwr = 0; u8 buf[4]; struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) }; u32 div = (params->frequency + 479500) / 125; if (params->frequency > 2000000) pwr = 3; else if (params->frequency > 1800000) pwr = 2; else if (params->frequency > 1600000) pwr = 1; else if (params->frequency > 1200000) pwr = 0; else if (params->frequency >= 1100000) pwr = 1; else pwr = 2; buf[0] = (div >> 8) & 0x7f; buf[1] = div & 0xff; buf[2] = ((div & 0x18000) >> 10) | 0x95; buf[3] = (pwr << 6) | 0x30; // NOTE: since we're using a prescaler of 2, we set the // divisor frequency to 62.5kHz and divide by 125 above if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO; return 0; } static struct ves1x93_config alps_bsrv2_config = { .demod_address = 0x08, .xin = 90100000UL, .invert_pwm = 0, }; static int alps_tdbe2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) { struct budget* budget = (struct budget*) fe->dvb->priv; u32 div; u8 data[4]; struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) }; div = (params->frequency + 35937500 + 31250) / 62500; data[0] = (div >> 8) & 0x7f; data[1] = div & 0xff; data[2] = 0x85 | ((div >> 10) & 0x60); data[3] = (params->frequency < 174000000 ? 0x88 : params->frequency < 470000000 ? 0x84 : 0x81); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO; return 0; } static struct ves1820_config alps_tdbe2_config = { .demod_address = 0x09, .xin = 57840000UL, .invert = 1, .selagc = VES1820_SELAGC_SIGNAMPERR, }; static int grundig_29504_401_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) { struct budget *budget = fe->dvb->priv; u8 *tuner_addr = fe->tuner_priv; u32 div; u8 cfg, cpump, band_select; u8 data[4]; struct i2c_msg msg = { .flags = 0, .buf = data, .len = sizeof(data) }; if (tuner_addr) msg.addr = *tuner_addr; else msg.addr = 0x61; div = (36125000 + params->frequency) / 166666; cfg = 0x88; if (params->frequency < 175000000) cpump = 2; else if (params->frequency < 390000000) cpump = 1; else if (params->frequency < 470000000) cpump = 2; else if (params->frequency < 750000000) cpump = 1; else cpump = 3; if (params->frequency < 175000000) band_select = 0x0e; else if (params->frequency < 470000000) band_select = 0x05; else band_select = 0x03; data[0] = (div >> 8) & 0x7f; data[1] = div & 0xff; data[2] = ((div >> 10) & 0x60) | cfg; data[3] = (cpump << 6) | band_select; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO; return 0; } static struct l64781_config grundig_29504_401_config = { .demod_address = 0x55, }; static struct l64781_config grundig_29504_401_config_activy = { .demod_address = 0x54, }; static u8 tuner_address_grundig_29504_401_activy = 0x60; static int grundig_29504_451_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) { struct budget* budget = (struct budget*) fe->dvb->priv; u32 div; u8 data[4]; struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) }; div = params->frequency / 125; data[0] = (div >> 8) & 0x7f; data[1] = div & 0xff; data[2] = 0x8e; data[3] = 0x00; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO; return 0; } static struct tda8083_config grundig_29504_451_config = { .demod_address = 0x68, }; static int s5h1420_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters* params) { struct budget* budget = (struct budget*) fe->dvb->priv; u32 div; u8 data[4]; struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) }; div = params->frequency / 1000; data[0] = (div >> 8) & 0x7f; data[1] = div & 0xff; data[2] = 0xc2; if (div < 1450) data[3] = 0x00; else if (div < 1850) data[3] = 0x40; else if (div < 2000) data[3] = 0x80; else data[3] = 0xc0; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1) return -EIO; return 0; } static struct s5h1420_config s5h1420_config = { .demod_address = 0x53, .invert = 1, .cdclk_polarity = 1, }; static struct tda10086_config tda10086_config = { .demod_address = 0x0e, .invert = 0, .diseqc_tone = 1, .xtal_freq = TDA10086_XTAL_16M, }; static struct stv0299_config alps_bsru6_config_activy = { .demod_address = 0x68, .inittab = alps_bsru6_inittab, .mclk = 88000000UL, .invert = 1, .op0_off = 1, .min_delay_ms = 100, .set_symbol_rate = alps_bsru6_set_symbol_rate, }; static struct stv0299_config alps_bsbe1_config_activy = { .demod_address = 0x68, .inittab = alps_bsbe1_inittab, .mclk = 88000000UL, .invert = 1, .op0_off = 1, .min_delay_ms = 100, .set_symbol_rate = alps_bsbe1_set_symbol_rate, }; static int alps_tdhd1_204_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char *name) { struct budget *budget = (struct budget *)fe->dvb->priv; return request_firmware(fw, name, &budget->dev->pci->dev); } static int i2c_readreg(struct i2c_adapter *i2c, u8 adr, u8 reg) { u8 val; struct i2c_msg msg[] = { { .addr = adr, .flags = 0, .buf = &reg, .len = 1 }, { .addr = adr, .flags = I2C_M_RD, .buf = &val, .len = 1 } }; return (i2c_transfer(i2c, msg, 2) != 2) ? -EIO : val; } static u8 read_pwm(struct budget* budget) { u8 b = 0xff; u8 pwm; struct i2c_msg msg[] = { { .addr = 0x50,.flags = 0,.buf = &b,.len = 1 }, { .addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1} }; if ((i2c_transfer(&budget->i2c_adap, msg, 2) != 2) || (pwm == 0xff)) pwm = 0x48; return pwm; } static struct stv090x_config tt1600_stv090x_config = { .device = STV0903, .demod_mode = STV090x_SINGLE, .clk_mode = STV090x_CLK_EXT, .xtal = 13500000, .address = 0x68, .ts1_mode = STV090x_TSMODE_DVBCI, .ts2_mode = STV090x_TSMODE_SERIAL_CONTINUOUS, .repeater_level = STV090x_RPTLEVEL_16, .tuner_init = NULL, .tuner_sleep = NULL, .tuner_set_mode = NULL, .tuner_set_frequency = NULL, .tuner_get_frequency = NULL, .tuner_set_bandwidth = NULL, .tuner_get_bandwidth = NULL, .tuner_set_bbgain = NULL, .tuner_get_bbgain = NULL, .tuner_set_refclk = NULL, .tuner_get_status = NULL, }; static struct stv6110x_config tt1600_stv6110x_config = { .addr = 0x60, .refclk = 27000000, .clk_div = 2, }; static struct isl6423_config tt1600_isl6423_config = { .current_max = SEC_CURRENT_515m, .curlim = SEC_CURRENT_LIM_ON, .mod_extern = 1, .addr = 0x08, }; static void frontend_init(struct budget *budget) { (void)alps_bsbe1_config; /* avoid warning */ switch(budget->dev->pci->subsystem_device) { case 0x1003: // Hauppauge/TT Nova budget (stv0299/ALPS BSRU6(tsa5059) OR ves1893/ALPS BSRV2(sp5659)) case 0x1013: // try the ALPS BSRV2 first of all budget->dvb_frontend = dvb_attach(ves1x93_attach, &alps_bsrv2_config, &budget->i2c_adap); if (budget->dvb_frontend) { budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsrv2_tuner_set_params; budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd; budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst; budget->dvb_frontend->ops.set_tone = budget_set_tone; break; } // try the ALPS BSRU6 now budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config, &budget->i2c_adap); if (budget->dvb_frontend) { budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params; budget->dvb_frontend->tuner_priv = &budget->i2c_adap; if (budget->dev->pci->subsystem_device == 0x1003 && diseqc_method == 0) { budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd; budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst; budget->dvb_frontend->ops.set_tone = budget_set_tone; } break; } break; case 0x1004: // Hauppauge/TT DVB-C budget (ves1820/ALPS TDBE2(sp5659)) budget->dvb_frontend = dvb_attach(ves1820_attach, &alps_tdbe2_config, &budget->i2c_adap, read_pwm(budget)); if (budget->dvb_frontend) { budget->dvb_frontend->ops.tuner_ops.set_params = alps_tdbe2_tuner_set_params; break; } break; case 0x1005: // Hauppauge/TT Nova-T budget (L64781/Grundig 29504-401(tsa5060)) budget->dvb_frontend = dvb_attach(l64781_attach, &grundig_29504_401_config, &budget->i2c_adap); if (budget->dvb_frontend) { budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_401_tuner_set_params; budget->dvb_frontend->tuner_priv = NULL; break; } break; case 0x4f60: /* Fujitsu Siemens Activy Budget-S PCI rev AL (stv0299/tsa5059) */ { int subtype = i2c_readreg(&budget->i2c_adap, 0x50, 0x67); if (subtype < 0) break; /* fixme: find a better way to identify the card */ if (subtype < 0x36) { /* assume ALPS BSRU6 */ budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config_activy, &budget->i2c_adap); if (budget->dvb_frontend) { printk(KERN_INFO "budget: tuner ALPS BSRU6 detected\n"); budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params; budget->dvb_frontend->tuner_priv = &budget->i2c_adap; budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage; budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL; break; } } else { /* assume ALPS BSBE1 */ /* reset tuner */ saa7146_setgpio(budget->dev, 3, SAA7146_GPIO_OUTLO); msleep(50); saa7146_setgpio(budget->dev, 3, SAA7146_GPIO_OUTHI); msleep(250); budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsbe1_config_activy, &budget->i2c_adap); if (budget->dvb_frontend) { printk(KERN_INFO "budget: tuner ALPS BSBE1 detected\n"); budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsbe1_tuner_set_params; budget->dvb_frontend->tuner_priv = &budget->i2c_adap; budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage; budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL; break; } } break; } case 0x4f61: // Fujitsu Siemens Activy Budget-S PCI rev GR (tda8083/Grundig 29504-451(tsa5522)) budget->dvb_frontend = dvb_attach(tda8083_attach, &grundig_29504_451_config, &budget->i2c_adap); if (budget->dvb_frontend) { budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_451_tuner_set_params; budget->dvb_frontend->ops.set_voltage = siemens_budget_set_voltage; budget->dvb_frontend->ops.dishnetwork_send_legacy_command = NULL; } break; case 0x5f60: /* Fujitsu Siemens Activy Budget-T PCI rev AL (tda10046/ALPS TDHD1-204A) */ budget->dvb_frontend = dvb_attach(tda10046_attach, &alps_tdhd1_204a_config, &budget->i2c_adap); if (budget->dvb_frontend) { budget->dvb_frontend->ops.tuner_ops.set_params = alps_tdhd1_204a_tuner_set_params; budget->dvb_frontend->tuner_priv = &budget->i2c_adap; } break; case 0x5f61: /* Fujitsu Siemens Activy Budget-T PCI rev GR (L64781/Grundig 29504-401(tsa5060)) */ budget->dvb_frontend = dvb_attach(l64781_attach, &grundig_29504_401_config_activy, &budget->i2c_adap); if (budget->dvb_frontend) { budget->dvb_frontend->tuner_priv = &tuner_address_grundig_29504_401_activy; budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_401_tuner_set_params; } break; case 0x1016: // Hauppauge/TT Nova-S SE (samsung s5h1420/????(tda8260)) budget->dvb_frontend = dvb_attach(s5h1420_attach, &s5h1420_config, &budget->i2c_adap); if (budget->dvb_frontend) { budget->dvb_frontend->ops.tuner_ops.set_params = s5h1420_tuner_set_params; if (dvb_attach(lnbp21_attach, budget->dvb_frontend, &budget->i2c_adap, 0, 0) == NULL) { printk("%s: No LNBP21 found!\n", __func__); goto error_out; } break; } case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262) // gpio2 is connected to CLB - reset it + leave it high saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO); msleep(1); saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI); msleep(1); budget->dvb_frontend = dvb_attach(tda10086_attach, &tda10086_config, &budget->i2c_adap); if (budget->dvb_frontend) { if (dvb_attach(tda826x_attach, budget->dvb_frontend, 0x60, &budget->i2c_adap, 0) == NULL) printk("%s: No tda826x found!\n", __func__); if (dvb_attach(lnbp21_attach, budget->dvb_frontend, &budget->i2c_adap, 0, 0) == NULL) { printk("%s: No LNBP21 found!\n", __func__); goto error_out; } break; } case 0x101c: { /* TT S2-1600 */ struct stv6110x_devctl *ctl; saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO); msleep(50); saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI); msleep(250); budget->dvb_frontend = dvb_attach(stv090x_attach, &tt1600_stv090x_config, &budget->i2c_adap, STV090x_DEMODULATOR_0); if (budget->dvb_frontend) { ctl = dvb_attach(stv6110x_attach, budget->dvb_frontend, &tt1600_stv6110x_config, &budget->i2c_adap); if (ctl) { tt1600_stv090x_config.tuner_init = ctl->tuner_init; tt1600_stv090x_config.tuner_sleep = ctl->tuner_sleep; tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode; tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency; tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency; tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth; tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth; tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain; tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain; tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk; tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status; /* call the init function once to initialize tuner's clock output divider and demod's master clock */ if (budget->dvb_frontend->ops.init) budget->dvb_frontend->ops.init(budget->dvb_frontend); if (dvb_attach(isl6423_attach, budget->dvb_frontend, &budget->i2c_adap, &tt1600_isl6423_config) == NULL) { printk(KERN_ERR "%s: No Intersil ISL6423 found!\n", __func__); goto error_out; } } else { printk(KERN_ERR "%s: No STV6110(A) Silicon Tuner found!\n", __func__); goto error_out; } } } break; } if (budget->dvb_frontend == NULL) { printk("budget: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n", budget->dev->pci->vendor, budget->dev->pci->device, budget->dev->pci->subsystem_vendor, budget->dev->pci->subsystem_device); } else { if (dvb_register_frontend(&budget->dvb_adapter, budget->dvb_frontend)) goto error_out; } return; error_out: printk("budget: Frontend registration failed!\n"); dvb_frontend_detach(budget->dvb_frontend); budget->dvb_frontend = NULL; return; } static int budget_attach (struct saa7146_dev* dev, struct saa7146_pci_extension_data *info) { struct budget *budget = NULL; int err; budget = kmalloc(sizeof(struct budget), GFP_KERNEL); if( NULL == budget ) { return -ENOMEM; } dprintk(2, "dev:%p, info:%p, budget:%p\n", dev, info, budget); dev->ext_priv = budget; err = ttpci_budget_init(budget, dev, info, THIS_MODULE, adapter_nr); if (err) { printk("==> failed\n"); kfree (budget); return err; } budget->dvb_adapter.priv = budget; frontend_init(budget); ttpci_budget_init_hooks(budget); return 0; } static int budget_detach (struct saa7146_dev* dev) { struct budget *budget = (struct budget*) dev->ext_priv; int err; if (budget->dvb_frontend) { dvb_unregister_frontend(budget->dvb_frontend); dvb_frontend_detach(budget->dvb_frontend); } err = ttpci_budget_deinit (budget); kfree (budget); dev->ext_priv = NULL; return err; } static struct saa7146_extension budget_extension; MAKE_BUDGET_INFO(ttbs, "TT-Budget/WinTV-NOVA-S PCI", BUDGET_TT); MAKE_BUDGET_INFO(ttbc, "TT-Budget/WinTV-NOVA-C PCI", BUDGET_TT); MAKE_BUDGET_INFO(ttbt, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT); MAKE_BUDGET_INFO(satel, "SATELCO Multimedia PCI", BUDGET_TT_HW_DISEQC); MAKE_BUDGET_INFO(ttbs1401, "TT-Budget-S-1401 PCI", BUDGET_TT); MAKE_BUDGET_INFO(tt1600, "TT-Budget S2-1600 PCI", BUDGET_TT); MAKE_BUDGET_INFO(fsacs0, "Fujitsu Siemens Activy Budget-S PCI (rev GR/grundig frontend)", BUDGET_FS_ACTIVY); MAKE_BUDGET_INFO(fsacs1, "Fujitsu Siemens Activy Budget-S PCI (rev AL/alps frontend)", BUDGET_FS_ACTIVY); MAKE_BUDGET_INFO(fsact, "Fujitsu Siemens Activy Budget-T PCI (rev GR/Grundig frontend)", BUDGET_FS_ACTIVY); MAKE_BUDGET_INFO(fsact1, "Fujitsu Siemens Activy Budget-T PCI (rev AL/ALPS TDHD1-204A)", BUDGET_FS_ACTIVY); static struct pci_device_id pci_tbl[] = { MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1003), MAKE_EXTENSION_PCI(ttbc, 0x13c2, 0x1004), MAKE_EXTENSION_PCI(ttbt, 0x13c2, 0x1005), MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013), MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016), MAKE_EXTENSION_PCI(ttbs1401, 0x13c2, 0x1018), MAKE_EXTENSION_PCI(tt1600, 0x13c2, 0x101c), MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60), MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61), MAKE_EXTENSION_PCI(fsact1, 0x1131, 0x5f60), MAKE_EXTENSION_PCI(fsact, 0x1131, 0x5f61), { .vendor = 0, } }; MODULE_DEVICE_TABLE(pci, pci_tbl); static struct saa7146_extension budget_extension = { .name = "budget dvb", .flags = SAA7146_USE_I2C_IRQ, .module = THIS_MODULE, .pci_tbl = pci_tbl, .attach = budget_attach, .detach = budget_detach, .irq_mask = MASK_10, .irq_func = ttpci_budget_irq10_handler, }; static int __init budget_init(void) { return saa7146_register_extension(&budget_extension); } static void __exit budget_exit(void) { saa7146_unregister_extension(&budget_extension); } module_init(budget_init); module_exit(budget_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others"); MODULE_DESCRIPTION("driver for the SAA7146 based so-called " "budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
gpl-2.0
JAV-Team-qcom/android_kernel_wingtech_msm8916
arch/metag/kernel/tcm.c
4336
3354
/* * Copyright (C) 2010 Imagination Technologies Ltd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/genalloc.h> #include <linux/string.h> #include <linux/list.h> #include <linux/slab.h> #include <asm/page.h> #include <asm/tcm.h> struct tcm_pool { struct list_head list; unsigned int tag; unsigned long start; unsigned long end; struct gen_pool *pool; }; static LIST_HEAD(pool_list); static struct tcm_pool *find_pool(unsigned int tag) { struct list_head *lh; struct tcm_pool *pool; list_for_each(lh, &pool_list) { pool = list_entry(lh, struct tcm_pool, list); if (pool->tag == tag) return pool; } return NULL; } /** * tcm_alloc - allocate memory from a TCM pool * @tag: tag of the pool to allocate memory from * @len: number of bytes to be allocated * * Allocate the requested number of bytes from the pool matching * the specified tag. Returns the address of the allocated memory * or zero on failure. */ unsigned long tcm_alloc(unsigned int tag, size_t len) { unsigned long vaddr; struct tcm_pool *pool; pool = find_pool(tag); if (!pool) return 0; vaddr = gen_pool_alloc(pool->pool, len); if (!vaddr) return 0; return vaddr; } /** * tcm_free - free a block of memory to a TCM pool * @tag: tag of the pool to free memory to * @addr: address of the memory to be freed * @len: number of bytes to be freed * * Free the requested number of bytes at a specific address to the * pool matching the specified tag. */ void tcm_free(unsigned int tag, unsigned long addr, size_t len) { struct tcm_pool *pool; pool = find_pool(tag); if (!pool) return; gen_pool_free(pool->pool, addr, len); } /** * tcm_lookup_tag - find the tag matching an address * @p: memory address to lookup the tag for * * Find the tag of the tcm memory region that contains the * specified address. Returns %TCM_INVALID_TAG if no such * memory region could be found. */ unsigned int tcm_lookup_tag(unsigned long p) { struct list_head *lh; struct tcm_pool *pool; unsigned long addr = (unsigned long) p; list_for_each(lh, &pool_list) { pool = list_entry(lh, struct tcm_pool, list); if (addr >= pool->start && addr < pool->end) return pool->tag; } return TCM_INVALID_TAG; } /** * tcm_add_region - add a memory region to TCM pool list * @reg: descriptor of region to be added * * Add a region of memory to the TCM pool list. Returns 0 on success. */ int __init tcm_add_region(struct tcm_region *reg) { struct tcm_pool *pool; pool = kmalloc(sizeof(*pool), GFP_KERNEL); if (!pool) { pr_err("Failed to alloc memory for TCM pool!\n"); return -ENOMEM; } pool->tag = reg->tag; pool->start = reg->res.start; pool->end = reg->res.end; /* * 2^3 = 8 bytes granularity to allow for 64bit access alignment. * -1 = NUMA node specifier. */ pool->pool = gen_pool_create(3, -1); if (!pool->pool) { pr_err("Failed to create TCM pool!\n"); kfree(pool); return -ENOMEM; } if (gen_pool_add(pool->pool, reg->res.start, reg->res.end - reg->res.start + 1, -1)) { pr_err("Failed to add memory to TCM pool!\n"); return -ENOMEM; } pr_info("Added %s TCM pool (%08x bytes @ %08x)\n", reg->res.name, reg->res.end - reg->res.start + 1, reg->res.start); list_add_tail(&pool->list, &pool_list); return 0; }
gpl-2.0
segment-routing/openwrt
arch/metag/kernel/tcm.c
4336
3354
/* * Copyright (C) 2010 Imagination Technologies Ltd. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/genalloc.h> #include <linux/string.h> #include <linux/list.h> #include <linux/slab.h> #include <asm/page.h> #include <asm/tcm.h> struct tcm_pool { struct list_head list; unsigned int tag; unsigned long start; unsigned long end; struct gen_pool *pool; }; static LIST_HEAD(pool_list); static struct tcm_pool *find_pool(unsigned int tag) { struct list_head *lh; struct tcm_pool *pool; list_for_each(lh, &pool_list) { pool = list_entry(lh, struct tcm_pool, list); if (pool->tag == tag) return pool; } return NULL; } /** * tcm_alloc - allocate memory from a TCM pool * @tag: tag of the pool to allocate memory from * @len: number of bytes to be allocated * * Allocate the requested number of bytes from the pool matching * the specified tag. Returns the address of the allocated memory * or zero on failure. */ unsigned long tcm_alloc(unsigned int tag, size_t len) { unsigned long vaddr; struct tcm_pool *pool; pool = find_pool(tag); if (!pool) return 0; vaddr = gen_pool_alloc(pool->pool, len); if (!vaddr) return 0; return vaddr; } /** * tcm_free - free a block of memory to a TCM pool * @tag: tag of the pool to free memory to * @addr: address of the memory to be freed * @len: number of bytes to be freed * * Free the requested number of bytes at a specific address to the * pool matching the specified tag. */ void tcm_free(unsigned int tag, unsigned long addr, size_t len) { struct tcm_pool *pool; pool = find_pool(tag); if (!pool) return; gen_pool_free(pool->pool, addr, len); } /** * tcm_lookup_tag - find the tag matching an address * @p: memory address to lookup the tag for * * Find the tag of the tcm memory region that contains the * specified address. Returns %TCM_INVALID_TAG if no such * memory region could be found. */ unsigned int tcm_lookup_tag(unsigned long p) { struct list_head *lh; struct tcm_pool *pool; unsigned long addr = (unsigned long) p; list_for_each(lh, &pool_list) { pool = list_entry(lh, struct tcm_pool, list); if (addr >= pool->start && addr < pool->end) return pool->tag; } return TCM_INVALID_TAG; } /** * tcm_add_region - add a memory region to TCM pool list * @reg: descriptor of region to be added * * Add a region of memory to the TCM pool list. Returns 0 on success. */ int __init tcm_add_region(struct tcm_region *reg) { struct tcm_pool *pool; pool = kmalloc(sizeof(*pool), GFP_KERNEL); if (!pool) { pr_err("Failed to alloc memory for TCM pool!\n"); return -ENOMEM; } pool->tag = reg->tag; pool->start = reg->res.start; pool->end = reg->res.end; /* * 2^3 = 8 bytes granularity to allow for 64bit access alignment. * -1 = NUMA node specifier. */ pool->pool = gen_pool_create(3, -1); if (!pool->pool) { pr_err("Failed to create TCM pool!\n"); kfree(pool); return -ENOMEM; } if (gen_pool_add(pool->pool, reg->res.start, reg->res.end - reg->res.start + 1, -1)) { pr_err("Failed to add memory to TCM pool!\n"); return -ENOMEM; } pr_info("Added %s TCM pool (%08x bytes @ %08x)\n", reg->res.name, reg->res.end - reg->res.start + 1, reg->res.start); list_add_tail(&pool->list, &pool_list); return 0; }
gpl-2.0
StelixROM/android_kernel_sony_msm8930
arch/alpha/kernel/osf_sys.c
4336
30504
/* * linux/arch/alpha/kernel/osf_sys.c * * Copyright (C) 1995 Linus Torvalds */ /* * This file handles some of the stranger OSF/1 system call interfaces. * Some of the system calls expect a non-C calling standard, others have * special parameter blocks.. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/utsname.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/major.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/types.h> #include <linux/ipc.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/vfs.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <asm/fpu.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/sysinfo.h> #include <asm/thread_info.h> #include <asm/hwrpb.h> #include <asm/processor.h> /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite * identical to OSF as we don't return 0 on success, but doing otherwise * would require changes to libc. Hopefully this is good enough. */ SYSCALL_DEFINE1(osf_brk, unsigned long, brk) { unsigned long retval = sys_brk(brk); if (brk && brk != retval) retval = -ENOMEM; return retval; } /* * This is pure guess-work.. */ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, unsigned long, text_len, unsigned long, bss_start, unsigned long, bss_len) { struct mm_struct *mm; mm = current->mm; mm->end_code = bss_start + bss_len; mm->start_brk = bss_start + bss_len; mm->brk = bss_start + bss_len; #if 0 printk("set_program_attributes(%lx %lx %lx %lx)\n", text_start, text_len, bss_start, bss_len); #endif return 0; } /* * OSF/1 directory handling functions... * * The "getdents()" interface is much more sane: the "basep" stuff is * braindamage (it can't really handle filesystems where the directory * offset differences aren't the same as "d_reclen"). */ #define NAME_OFFSET offsetof (struct osf_dirent, d_name) struct osf_dirent { unsigned int d_ino; unsigned short d_reclen; unsigned short d_namlen; char d_name[1]; }; struct osf_dirent_callback { struct osf_dirent __user *dirent; long __user *basep; unsigned int count; int error; }; static int osf_filldir(void *__buf, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct osf_dirent __user *dirent; struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf; unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32)); unsigned int d_ino; buf->error = -EINVAL; /* only used if we fail */ if (reclen > buf->count) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return -EOVERFLOW; } if (buf->basep) { if (put_user(offset, buf->basep)) goto Efault; buf->basep = NULL; } dirent = buf->dirent; if (put_user(d_ino, &dirent->d_ino) || put_user(namlen, &dirent->d_namlen) || put_user(reclen, &dirent->d_reclen) || copy_to_user(dirent->d_name, name, namlen) || put_user(0, dirent->d_name + namlen)) goto Efault; dirent = (void __user *)dirent + reclen; buf->dirent = dirent; buf->count -= reclen; return 0; Efault: buf->error = -EFAULT; return -EFAULT; } SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, struct osf_dirent __user *, dirent, unsigned int, count, long __user *, basep) { int error; struct file *file; struct osf_dirent_callback buf; error = -EBADF; file = fget(fd); if (!file) goto out; buf.dirent = dirent; buf.basep = basep; buf.count = count; buf.error = 0; error = vfs_readdir(file, osf_filldir, &buf); if (error >= 0) error = buf.error; if (count != buf.count) error = count - buf.count; fput(file); out: return error; } #undef NAME_OFFSET SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { unsigned long ret = -EINVAL; #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", current->comm, flags); #endif if ((off + PAGE_ALIGN(len)) < off) goto out; if (off & ~PAGE_MASK) goto out; ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return ret; } /* * The OSF/1 statfs structure is much larger, but this should * match the beginning, at least. */ struct osf_statfs { short f_type; short f_flags; int f_fsize; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; __kernel_fsid_t f_fsid; }; static int linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, unsigned long bufsiz) { struct osf_statfs tmp_stat; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_flags = 0; /* mount flags */ tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } /* * Uhh.. OSF/1 mount parameters aren't exactly obvious.. * * Although to be frank, neither are the native Linux/i386 ones.. */ struct ufs_args { char __user *devname; int flags; uid_t exroot; }; struct cdfs_args { char __user *devname; int flags; uid_t exroot; /* This has lots more here, which Linux handles with the option block but I'm too lazy to do the translation into ASCII. */ }; struct procfs_args { char __user *devname; int flags; uid_t exroot; }; /* * We can't actually handle ufs yet, so we translate UFS mounts to * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS * layout is so braindead it's a major headache doing it. * * Just how long ago was it written? OTOH our UFS driver may be still * unhappy with OSF UFS. [CHECKME] */ static int osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; char *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname, dirname, "ext2", flags, NULL); putname(devname); out: return retval; } static int osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; char *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname, dirname, "iso9660", flags, NULL); putname(devname); out: return retval; } static int osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; return do_mount("", dirname, "proc", flags, NULL); } SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, int, flag, void __user *, data) { int retval; char *name; name = getname(path); retval = PTR_ERR(name); if (IS_ERR(name)) goto out; switch (typenr) { case 1: retval = osf_ufs_mount(name, data, flag); break; case 6: retval = osf_cdfs_mount(name, data, flag); break; case 9: retval = osf_procfs_mount(name, data, flag); break; default: retval = -EINVAL; printk("osf_mount(%ld, %x)\n", typenr, flag); } putname(name); out: return retval; } SYSCALL_DEFINE1(osf_utsname, char __user *, name) { int error; down_read(&uts_sem); error = -EFAULT; if (copy_to_user(name + 0, utsname()->sysname, 32)) goto out; if (copy_to_user(name + 32, utsname()->nodename, 32)) goto out; if (copy_to_user(name + 64, utsname()->release, 32)) goto out; if (copy_to_user(name + 96, utsname()->version, 32)) goto out; if (copy_to_user(name + 128, utsname()->machine, 32)) goto out; error = 0; out: up_read(&uts_sem); return error; } SYSCALL_DEFINE0(getpagesize) { return PAGE_SIZE; } SYSCALL_DEFINE0(getdtablesize) { return sysctl_nr_open; } /* * For compatibility with OSF/1 only. Use utsname(2) instead. */ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { unsigned len; int i; if (!access_ok(VERIFY_WRITE, name, namelen)) return -EFAULT; len = namelen; if (len > 32) len = 32; down_read(&uts_sem); for (i = 0; i < len; ++i) { __put_user(utsname()->domainname[i], name + i); if (utsname()->domainname[i] == '\0') break; } up_read(&uts_sem); return 0; } /* * The following stuff should move into a header file should it ever * be labeled "officially supported." Right now, there is just enough * support to avoid applications (such as tar) printing error * messages. The attributes are not really implemented. */ /* * Values for Property list entry flag */ #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry by default */ #define PLE_FLAG_MASK 0x1 /* Valid flag values */ #define PLE_FLAG_ALL -1 /* All flag value */ struct proplistname_args { unsigned int pl_mask; unsigned int pl_numnames; char **pl_names; }; union pl_args { struct setargs { char __user *path; long follow; long nbytes; char __user *buf; } set; struct fsetargs { long fd; long nbytes; char __user *buf; } fset; struct getargs { char __user *path; long follow; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } get; struct fgetargs { long fd; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } fget; struct delargs { char __user *path; long follow; struct proplistname_args __user *name_args; } del; struct fdelargs { long fd; struct proplistname_args __user *name_args; } fdel; }; enum pl_code { PL_SET = 1, PL_FSET = 2, PL_GET = 3, PL_FGET = 4, PL_DEL = 5, PL_FDEL = 6 }; SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, union pl_args __user *, args) { long error; int __user *min_buf_size_ptr; switch (code) { case PL_SET: if (get_user(error, &args->set.nbytes)) error = -EFAULT; break; case PL_FSET: if (get_user(error, &args->fset.nbytes)) error = -EFAULT; break; case PL_GET: error = get_user(min_buf_size_ptr, &args->get.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_FGET: error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_DEL: case PL_FDEL: error = 0; break; default: error = -EOPNOTSUPP; break; }; return error; } SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, struct sigstack __user *, uoss) { unsigned long usp = rdusp(); unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; unsigned long oss_os = on_sig_stack(usp); int error; if (uss) { void __user *ss_sp; error = -EFAULT; if (get_user(ss_sp, &uss->ss_sp)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ error = -EPERM; if (current->sas_ss_sp && on_sig_stack(usp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } if (uoss) { error = -EFAULT; if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)) || __put_user(oss_sp, &uoss->ss_sp) || __put_user(oss_os, &uoss->ss_onstack)) goto out; } error = 0; out: return error; } SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) { const char *sysinfo_table[] = { utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine, "alpha", /* instruction set architecture */ "dummy", /* hardware serial number */ "dummy", /* hardware manufacturer */ "dummy", /* secure RPC domain */ }; unsigned long offset; const char *res; long len, err = -EINVAL; offset = command-1; if (offset >= ARRAY_SIZE(sysinfo_table)) { /* Digital UNIX has a few unpublished interfaces here */ printk("sysinfo(%d)", command); goto out; } down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; if ((unsigned long)len > (unsigned long)count) len = count; if (copy_to_user(buf, res, len)) err = -EFAULT; else err = 0; up_read(&uts_sem); out: return err; } SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { unsigned long w; struct percpu_struct *cpu; switch (op) { case GSI_IEEE_FP_CONTROL: /* Return current software fp control & status bits. */ /* Note that DU doesn't verify available space here. */ w = current_thread_info()->ieee_state & IEEE_SW_MASK; w = swcr_update_status(w, rdfpcr()); if (put_user(w, (unsigned long __user *) buffer)) return -EFAULT; return 0; case GSI_IEEE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case GSI_UACPROC: if (nbytes < sizeof(unsigned int)) return -EINVAL; w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) & UAC_BITMASK; if (put_user(w, (unsigned int __user *)buffer)) return -EFAULT; return 1; case GSI_PROC_TYPE: if (nbytes < sizeof(unsigned long)) return -EINVAL; cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); w = cpu->type; if (put_user(w, (unsigned long __user*)buffer)) return -EFAULT; return 1; case GSI_GET_HWRPB: if (nbytes > sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; return 1; default: break; } return -EOPNOTSUPP; } SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { switch (op) { case SSI_IEEE_FP_CONTROL: { unsigned long swcr, fpcr; unsigned int *state; /* * Alpha Architecture Handbook 4.7.7.3: * To be fully IEEE compiant, we must track the current IEEE * exception state in software, because spurious bits can be * set in the trap shadow of a software-complete insn. */ if (get_user(swcr, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; /* Update softare trap enable bits. */ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); /* Update the real fpcr. */ fpcr = rdfpcr() & FPCR_DYN_MASK; fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); return 0; } case SSI_IEEE_RAISE_EXCEPTION: { unsigned long exc, swcr, fpcr, fex; unsigned int *state; if (get_user(exc, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; exc &= IEEE_STATUS_MASK; /* Update softare trap enable bits. */ swcr = (*state & IEEE_SW_MASK) | exc; *state |= exc; /* Update the real fpcr. */ fpcr = rdfpcr(); fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); /* If any exceptions set by this call, and are unmasked, send a signal. Old exceptions are not signaled. */ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; if (fex) { siginfo_t info; int si_code = 0; if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = NULL; /* FIXME */ send_sig_info(SIGFPE, &info, current); } return 0; } case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case SSI_NVPAIRS: { unsigned long v, w, i; unsigned int old, new; for (i = 0; i < nbytes; ++i) { if (get_user(v, 2*i + (unsigned int __user *)buffer)) return -EFAULT; if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer)) return -EFAULT; switch (v) { case SSIN_UACPROC: again: old = current_thread_info()->flags; new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT); new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT; if (cmpxchg(&current_thread_info()->flags, old, new) != old) goto again; break; default: return -EOPNOTSUPP; } } return 0; } default: break; } return -EOPNOTSUPP; } /* Translations due to the fact that OSF's time_t is an int. Which affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; struct timeval32 { int tv_sec, tv_usec; }; struct itimerval32 { struct timeval32 it_interval; struct timeval32 it_value; }; static inline long get_tv32(struct timeval *o, struct timeval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); } static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); } static inline long get_it32(struct itimerval *o, struct itimerval32 __user *i) { return (!access_ok(VERIFY_READ, i, sizeof(*i)) || (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) | __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) | __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) | __get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); } static inline long put_it32(struct itimerval32 __user *o, struct itimerval *i) { return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) | __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) | __put_user(i->it_value.tv_usec, &o->it_value.tv_usec))); } static inline void jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) { value->tv_usec = (jiffies % HZ) * (1000000L / HZ); value->tv_sec = jiffies / HZ; } SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { struct timeval ktv; do_gettimeofday(&ktv); if (put_tv32(tv, &ktv)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec kts; struct timezone ktz; if (tv) { if (get_tv32((struct timeval *)&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } kts.tv_nsec *= 1000; return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it) { struct itimerval kit; int error; error = do_getitimer(which, &kit); if (!error && put_it32(it, &kit)) error = -EFAULT; return error; } SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in, struct itimerval32 __user *, out) { struct itimerval kin, kout; int error; if (in) { if (get_it32(&kin, in)) return -EFAULT; } else memset(&kin, 0, sizeof(kin)); error = do_setitimer(which, &kin, out ? &kout : NULL); if (error || !out) return error; if (put_it32(out, &kout)) return -EFAULT; return 0; } SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, struct timeval32 __user *, tvs) { struct timespec tv[2]; if (tvs) { struct timeval ktvs[2]; if (get_tv32(&ktvs[0], &tvs[0]) || get_tv32(&ktvs[1], &tvs[1])) return -EFAULT; if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 || ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000) return -EINVAL; tv[0].tv_sec = ktvs[0].tv_sec; tv[0].tv_nsec = 1000 * ktvs[0].tv_usec; tv[1].tv_sec = ktvs[1].tv_sec; tv[1].tv_nsec = 1000 * ktvs[1].tv_usec; } return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval32 __user *, tvp) { struct timespec end_time, *to = NULL; if (tvp) { time_t sec, usec; to = &end_time; if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp)) || __get_user(sec, &tvp->tv_sec) || __get_user(usec, &tvp->tv_usec)) { return -EFAULT; } if (sec < 0 || usec < 0) return -EINVAL; if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC)) return -EINVAL; } /* OSF does not copy back the remaining time. */ return core_sys_select(n, inp, outp, exp, to); } struct rusage32 { struct timeval32 ru_utime; /* user time used */ struct timeval32 ru_stime; /* system time used */ long ru_maxrss; /* maximum resident set size */ long ru_ixrss; /* integral shared memory size */ long ru_idrss; /* integral unshared data size */ long ru_isrss; /* integral unshared stack size */ long ru_minflt; /* page reclaims */ long ru_majflt; /* page faults */ long ru_nswap; /* swaps */ long ru_inblock; /* block input operations */ long ru_oublock; /* block output operations */ long ru_msgsnd; /* messages sent */ long ru_msgrcv; /* messages received */ long ru_nsignals; /* signals received */ long ru_nvcsw; /* voluntary context switches */ long ru_nivcsw; /* involuntary " */ }; SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: jiffies_to_timeval32(current->utime, &r.ru_utime); jiffies_to_timeval32(current->stime, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: jiffies_to_timeval32(current->signal->cutime, &r.ru_utime); jiffies_to_timeval32(current->signal->cstime, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; } return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { struct rusage r; long ret, err; unsigned int status = 0; mm_segment_t old_fs; if (!ur) return sys_wait4(pid, ustatus, options, NULL); old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_wait4(pid, (unsigned int __user *) &status, options, (struct rusage __user *) &r); set_fs (old_fs); if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) return -EFAULT; err = 0; err |= put_user(status, ustatus); err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec); err |= __put_user(r.ru_maxrss, &ur->ru_maxrss); err |= __put_user(r.ru_ixrss, &ur->ru_ixrss); err |= __put_user(r.ru_idrss, &ur->ru_idrss); err |= __put_user(r.ru_isrss, &ur->ru_isrss); err |= __put_user(r.ru_minflt, &ur->ru_minflt); err |= __put_user(r.ru_majflt, &ur->ru_majflt); err |= __put_user(r.ru_nswap, &ur->ru_nswap); err |= __put_user(r.ru_inblock, &ur->ru_inblock); err |= __put_user(r.ru_oublock, &ur->ru_oublock); err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd); err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv); err |= __put_user(r.ru_nsignals, &ur->ru_nsignals); err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw); err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw); return err ? err : ret; } /* * I don't know what the parameters are: the first one * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, struct timeval32 __user *, remain) { struct timeval tmp; unsigned long ticks; if (get_tv32(&tmp, sleep)) goto fault; ticks = timeval_to_jiffies(&tmp); ticks = schedule_timeout_interruptible(ticks); if (remain) { jiffies_to_timeval(ticks, &tmp); if (put_tv32(remain, &tmp)) goto fault; } return 0; fault: return -EFAULT; } struct timex32 { unsigned int modes; /* mode selector */ long offset; /* time offset (usec) */ long freq; /* frequency offset (scaled ppm) */ long maxerror; /* maximum error (usec) */ long esterror; /* estimated error (usec) */ int status; /* clock command/status */ long constant; /* pll time constant */ long precision; /* clock precision (usec) (read only) */ long tolerance; /* clock frequency tolerance (ppm) * (read only) */ struct timeval32 time; /* (read only) */ long tick; /* (modified) usecs between clock ticks */ long ppsfreq; /* pps frequency (scaled ppm) (ro) */ long jitter; /* pps jitter (us) (ro) */ int shift; /* interval duration (s) (shift) (ro) */ long stabil; /* pps stability (scaled ppm) (ro) */ long jitcnt; /* jitter limit exceeded (ro) */ long calcnt; /* calibration intervals (ro) */ long errcnt; /* calibration errors (ro) */ long stbcnt; /* stability limit exceeded (ro) */ int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; }; SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { struct timex txc; int ret; /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - offsetof(struct timex32, time))) return -EFAULT; ret = do_adjtimex(&txc); if (ret < 0) return ret; /* copy back to timex32 */ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) || (put_tv32(&txc_p->time, &txc.time))) return -EFAULT; return ret; } /* Get an address range which is currently unmapped. Similar to the generic version except that we know how to honor ADDR_LIMIT_32BIT. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, unsigned long limit) { struct vm_area_struct *vma = find_vma(current->mm, addr); while (1) { /* At this point: (!vma || addr < vma->vm_end). */ if (limit - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; vma = vma->vm_next; } } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long limit; /* "32 bit" actually means 31 bit, since pointers sign extend. */ if (current->personality & ADDR_LIMIT_32BIT) limit = 0x80000000; else limit = TASK_SIZE; if (len > limit) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* First, see if the given suggestion fits. The OSF/1 loader (/sbin/loader) relies on us returning an address larger than the requested if one exists, which is a terribly broken way to program. That said, I can see the use in being able to suggest not merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; } /* Next, try allocating at TASK_UNMAPPED_BASE. */ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; /* Finally, try allocating in low memory. */ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); return addr; } #ifdef CONFIG_OSF4_COMPAT /* Clear top 32 bits of iov_len in the user's buffer for compatibility with old versions of OSF/1 where iov_len was defined as int. */ static int osf_fix_iov_len(const struct iovec __user *iov, unsigned long count) { unsigned long i; for (i = 0 ; i < count ; i++) { int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1; if (put_user(0, iov_len_high)) return -EFAULT; } return 0; } SYSCALL_DEFINE3(osf_readv, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_readv(fd, vector, count); } SYSCALL_DEFINE3(osf_writev, unsigned long, fd, const struct iovec __user *, vector, unsigned long, count) { if (unlikely(personality(current->personality) == PER_OSF4)) if (osf_fix_iov_len(vector, count)) return -EFAULT; return sys_writev(fd, vector, count); } #endif
gpl-2.0
PyYoshi/android_kernel_kyocera_l02
arch/arm/mach-picoxcell/time.c
4848
2990
/* * Copyright (c) 2011 Picochip Ltd., Jamie Iles * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * All enquiries to support@picochip.com */ #include <linux/dw_apb_timer.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/mach/time.h> #include <asm/sched_clock.h> #include "common.h" static void timer_get_base_and_rate(struct device_node *np, void __iomem **base, u32 *rate) { *base = of_iomap(np, 0); if (!*base) panic("Unable to map regs for %s", np->name); if (of_property_read_u32(np, "clock-freq", rate)) panic("No clock-freq property for %s", np->name); } static void picoxcell_add_clockevent(struct device_node *event_timer) { void __iomem *iobase; struct dw_apb_clock_event_device *ced; u32 irq, rate; irq = irq_of_parse_and_map(event_timer, 0); if (irq == NO_IRQ) panic("No IRQ for clock event timer"); timer_get_base_and_rate(event_timer, &iobase, &rate); ced = dw_apb_clockevent_init(0, event_timer->name, 300, iobase, irq, rate); if (!ced) panic("Unable to initialise clockevent device"); dw_apb_clockevent_register(ced); } static void picoxcell_add_clocksource(struct device_node *source_timer) { void __iomem *iobase; struct dw_apb_clocksource *cs; u32 rate; timer_get_base_and_rate(source_timer, &iobase, &rate); cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate); if (!cs) panic("Unable to initialise clocksource device"); dw_apb_clocksource_start(cs); dw_apb_clocksource_register(cs); } static void __iomem *sched_io_base; static u32 picoxcell_read_sched_clock(void) { return __raw_readl(sched_io_base); } static const struct of_device_id picoxcell_rtc_ids[] __initconst = { { .compatible = "picochip,pc3x2-rtc" }, { /* Sentinel */ }, }; static void picoxcell_init_sched_clock(void) { struct device_node *sched_timer; u32 rate; sched_timer = of_find_matching_node(NULL, picoxcell_rtc_ids); if (!sched_timer) panic("No RTC for sched clock to use"); timer_get_base_and_rate(sched_timer, &sched_io_base, &rate); of_node_put(sched_timer); setup_sched_clock(picoxcell_read_sched_clock, 32, rate); } static const struct of_device_id picoxcell_timer_ids[] __initconst = { { .compatible = "picochip,pc3x2-timer" }, {}, }; static void __init picoxcell_timer_init(void) { struct device_node *event_timer, *source_timer; event_timer = of_find_matching_node(NULL, picoxcell_timer_ids); if (!event_timer) panic("No timer for clockevent"); picoxcell_add_clockevent(event_timer); source_timer = of_find_matching_node(event_timer, picoxcell_timer_ids); if (!source_timer) panic("No timer for clocksource"); picoxcell_add_clocksource(source_timer); of_node_put(source_timer); picoxcell_init_sched_clock(); } struct sys_timer picoxcell_timer = { .init = picoxcell_timer_init, };
gpl-2.0
46890580/rk3188-kernel-3.0
drivers/media/video/ivtv/ivtv-i2c.c
8176
21585
/* I2C functions Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This file includes an i2c implementation that was reverse engineered from the Hauppauge windows driver. Older ivtv versions used i2c-algo-bit, which whilst fine under most circumstances, had trouble with the Zilog CPU on the PVR-150 which handles IR functions (occasional inability to communicate with the chip until it was reset) and also with the i2c bus being completely unreachable when multiple PVR cards were present. The implementation is very similar to i2c-algo-bit, but there are enough subtle differences that the two are hard to merge. The general strategy employed by i2c-algo-bit is to use udelay() to implement the timing when putting out bits on the scl/sda lines. The general strategy taken here is to poll the lines for state changes (see ivtv_waitscl and ivtv_waitsda). In addition there are small delays at various locations which poll the SCL line 5 times (ivtv_scldelay). I would guess that since this is memory mapped I/O that the length of those delays is tied to the PCI bus clock. There is some extra code to do with recovery and retries. Since it is not known what causes the actual i2c problems in the first place, the only goal if one was to attempt to use i2c-algo-bit would be to try to make it follow the same code path. This would be a lot of work, and I'm also not convinced that it would provide a generic benefit to i2c-algo-bit. Therefore consider this an engineering solution -- not pretty, but it works. Some more general comments about what we are doing: The i2c bus is a 2 wire serial bus, with clock (SCL) and data (SDA) lines. To communicate on the bus (as a master, we don't act as a slave), we first initiate a start condition (ivtv_start). We then write the address of the device that we want to communicate with, along with a flag that indicates whether this is a read or a write. The slave then issues an ACK signal (ivtv_ack), which tells us that it is ready for reading / writing. We then proceed with reading or writing (ivtv_read/ivtv_write), and finally issue a stop condition (ivtv_stop) to make the bus available to other masters. There is an additional form of transaction where a write may be immediately followed by a read. In this case, there is no intervening stop condition. (Only the msp3400 chip uses this method of data transfer). */ #include "ivtv-driver.h" #include "ivtv-cards.h" #include "ivtv-gpio.h" #include "ivtv-i2c.h" #include <media/cx25840.h> /* i2c implementation for cx23415/6 chip, ivtv project. * Author: Kevin Thayer (nufan_wfk at yahoo.com) */ /* i2c stuff */ #define IVTV_REG_I2C_SETSCL_OFFSET 0x7000 #define IVTV_REG_I2C_SETSDA_OFFSET 0x7004 #define IVTV_REG_I2C_GETSCL_OFFSET 0x7008 #define IVTV_REG_I2C_GETSDA_OFFSET 0x700c #define IVTV_CS53L32A_I2C_ADDR 0x11 #define IVTV_M52790_I2C_ADDR 0x48 #define IVTV_CX25840_I2C_ADDR 0x44 #define IVTV_SAA7115_I2C_ADDR 0x21 #define IVTV_SAA7127_I2C_ADDR 0x44 #define IVTV_SAA717x_I2C_ADDR 0x21 #define IVTV_MSP3400_I2C_ADDR 0x40 #define IVTV_HAUPPAUGE_I2C_ADDR 0x50 #define IVTV_WM8739_I2C_ADDR 0x1a #define IVTV_WM8775_I2C_ADDR 0x1b #define IVTV_TEA5767_I2C_ADDR 0x60 #define IVTV_UPD64031A_I2C_ADDR 0x12 #define IVTV_UPD64083_I2C_ADDR 0x5c #define IVTV_VP27SMPX_I2C_ADDR 0x5b #define IVTV_M52790_I2C_ADDR 0x48 #define IVTV_AVERMEDIA_IR_RX_I2C_ADDR 0x40 #define IVTV_HAUP_EXT_IR_RX_I2C_ADDR 0x1a #define IVTV_HAUP_INT_IR_RX_I2C_ADDR 0x18 #define IVTV_Z8F0811_IR_TX_I2C_ADDR 0x70 #define IVTV_Z8F0811_IR_RX_I2C_ADDR 0x71 #define IVTV_ADAPTEC_IR_ADDR 0x6b /* This array should match the IVTV_HW_ defines */ static const u8 hw_addrs[] = { IVTV_CX25840_I2C_ADDR, IVTV_SAA7115_I2C_ADDR, IVTV_SAA7127_I2C_ADDR, IVTV_MSP3400_I2C_ADDR, 0, IVTV_WM8775_I2C_ADDR, IVTV_CS53L32A_I2C_ADDR, 0, IVTV_SAA7115_I2C_ADDR, IVTV_UPD64031A_I2C_ADDR, IVTV_UPD64083_I2C_ADDR, IVTV_SAA717x_I2C_ADDR, IVTV_WM8739_I2C_ADDR, IVTV_VP27SMPX_I2C_ADDR, IVTV_M52790_I2C_ADDR, 0, /* IVTV_HW_GPIO dummy driver ID */ IVTV_AVERMEDIA_IR_RX_I2C_ADDR, /* IVTV_HW_I2C_IR_RX_AVER */ IVTV_HAUP_EXT_IR_RX_I2C_ADDR, /* IVTV_HW_I2C_IR_RX_HAUP_EXT */ IVTV_HAUP_INT_IR_RX_I2C_ADDR, /* IVTV_HW_I2C_IR_RX_HAUP_INT */ IVTV_Z8F0811_IR_TX_I2C_ADDR, /* IVTV_HW_Z8F0811_IR_TX_HAUP */ IVTV_Z8F0811_IR_RX_I2C_ADDR, /* IVTV_HW_Z8F0811_IR_RX_HAUP */ IVTV_ADAPTEC_IR_ADDR, /* IVTV_HW_I2C_IR_RX_ADAPTEC */ }; /* This array should match the IVTV_HW_ defines */ static const char * const hw_devicenames[] = { "cx25840", "saa7115", "saa7127_auto", /* saa7127 or saa7129 */ "msp3400", "tuner", "wm8775", "cs53l32a", "tveeprom", "saa7114", "upd64031a", "upd64083", "saa717x", "wm8739", "vp27smpx", "m52790", "gpio", "ir_video", /* IVTV_HW_I2C_IR_RX_AVER */ "ir_video", /* IVTV_HW_I2C_IR_RX_HAUP_EXT */ "ir_video", /* IVTV_HW_I2C_IR_RX_HAUP_INT */ "ir_tx_z8f0811_haup", /* IVTV_HW_Z8F0811_IR_TX_HAUP */ "ir_rx_z8f0811_haup", /* IVTV_HW_Z8F0811_IR_RX_HAUP */ "ir_video", /* IVTV_HW_I2C_IR_RX_ADAPTEC */ }; static int get_key_adaptec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw) { unsigned char keybuf[4]; keybuf[0] = 0x00; i2c_master_send(ir->c, keybuf, 1); /* poll IR chip */ if (i2c_master_recv(ir->c, keybuf, sizeof(keybuf)) != sizeof(keybuf)) { return 0; } /* key pressed ? */ if (keybuf[2] == 0xff) return 0; /* remove repeat bit */ keybuf[2] &= 0x7f; keybuf[3] |= 0x80; *ir_key = keybuf[3] | keybuf[2] << 8 | keybuf[1] << 16 |keybuf[0] << 24; *ir_raw = *ir_key; return 1; } static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr) { struct i2c_board_info info; struct i2c_adapter *adap = &itv->i2c_adap; struct IR_i2c_init_data *init_data = &itv->ir_i2c_init_data; unsigned short addr_list[2] = { addr, I2C_CLIENT_END }; /* Only allow one IR transmitter to be registered per board */ if (hw & IVTV_HW_IR_TX_ANY) { if (itv->hw_flags & IVTV_HW_IR_TX_ANY) return -1; memset(&info, 0, sizeof(struct i2c_board_info)); strlcpy(info.type, type, I2C_NAME_SIZE); return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ? -1 : 0; } /* Only allow one IR receiver to be registered per board */ if (itv->hw_flags & IVTV_HW_IR_RX_ANY) return -1; /* Our default information for ir-kbd-i2c.c to use */ switch (hw) { case IVTV_HW_I2C_IR_RX_AVER: init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS; init_data->internal_get_key_func = IR_KBD_GET_KEY_AVERMEDIA_CARDBUS; init_data->type = RC_TYPE_OTHER; init_data->name = "AVerMedia AVerTV card"; break; case IVTV_HW_I2C_IR_RX_HAUP_EXT: case IVTV_HW_I2C_IR_RX_HAUP_INT: init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP; init_data->type = RC_TYPE_RC5; init_data->name = itv->card_name; break; case IVTV_HW_Z8F0811_IR_RX_HAUP: /* Default to grey remote */ init_data->ir_codes = RC_MAP_HAUPPAUGE; init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR; init_data->type = RC_TYPE_RC5; init_data->name = itv->card_name; break; case IVTV_HW_I2C_IR_RX_ADAPTEC: init_data->get_key = get_key_adaptec; init_data->name = itv->card_name; /* FIXME: The protocol and RC_MAP needs to be corrected */ init_data->ir_codes = RC_MAP_EMPTY; init_data->type = RC_TYPE_UNKNOWN; break; } memset(&info, 0, sizeof(struct i2c_board_info)); info.platform_data = init_data; strlcpy(info.type, type, I2C_NAME_SIZE); return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ? -1 : 0; } /* Instantiate the IR receiver device using probing -- undesirable */ struct i2c_client *ivtv_i2c_new_ir_legacy(struct ivtv *itv) { struct i2c_board_info info; /* * The external IR receiver is at i2c address 0x34. * The internal IR receiver is at i2c address 0x30. * * In theory, both can be fitted, and Hauppauge suggests an external * overrides an internal. That's why we probe 0x1a (~0x34) first. CB * * Some of these addresses we probe may collide with other i2c address * allocations, so this function must be called after all other i2c * devices we care about are registered. */ const unsigned short addr_list[] = { 0x1a, /* Hauppauge IR external - collides with WM8739 */ 0x18, /* Hauppauge IR internal */ I2C_CLIENT_END }; memset(&info, 0, sizeof(struct i2c_board_info)); strlcpy(info.type, "ir_video", I2C_NAME_SIZE); return i2c_new_probed_device(&itv->i2c_adap, &info, addr_list, NULL); } int ivtv_i2c_register(struct ivtv *itv, unsigned idx) { struct v4l2_subdev *sd; struct i2c_adapter *adap = &itv->i2c_adap; const char *type = hw_devicenames[idx]; u32 hw = 1 << idx; if (idx >= ARRAY_SIZE(hw_addrs)) return -1; if (hw == IVTV_HW_TUNER) { /* special tuner handling */ sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, itv->card_i2c->radio); if (sd) sd->grp_id = 1 << idx; sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, itv->card_i2c->demod); if (sd) sd->grp_id = 1 << idx; sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, itv->card_i2c->tv); if (sd) sd->grp_id = 1 << idx; return sd ? 0 : -1; } if (hw & IVTV_HW_IR_ANY) return ivtv_i2c_new_ir(itv, hw, type, hw_addrs[idx]); /* Is it not an I2C device or one we do not wish to register? */ if (!hw_addrs[idx]) return -1; /* It's an I2C device other than an analog tuner or IR chip */ if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) { sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, I2C_ADDRS(hw_addrs[idx])); } else if (hw == IVTV_HW_CX25840) { struct cx25840_platform_data pdata; struct i2c_board_info cx25840_info = { .type = "cx25840", .addr = hw_addrs[idx], .platform_data = &pdata, }; pdata.pvr150_workaround = itv->pvr150_workaround; sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap, &cx25840_info, NULL); } else { sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, hw_addrs[idx], NULL); } if (sd) sd->grp_id = 1 << idx; return sd ? 0 : -1; } struct v4l2_subdev *ivtv_find_hw(struct ivtv *itv, u32 hw) { struct v4l2_subdev *result = NULL; struct v4l2_subdev *sd; spin_lock(&itv->v4l2_dev.lock); v4l2_device_for_each_subdev(sd, &itv->v4l2_dev) { if (sd->grp_id == hw) { result = sd; break; } } spin_unlock(&itv->v4l2_dev.lock); return result; } /* Set the serial clock line to the desired state */ static void ivtv_setscl(struct ivtv *itv, int state) { /* write them out */ /* write bits are inverted */ write_reg(~state, IVTV_REG_I2C_SETSCL_OFFSET); } /* Set the serial data line to the desired state */ static void ivtv_setsda(struct ivtv *itv, int state) { /* write them out */ /* write bits are inverted */ write_reg(~state & 1, IVTV_REG_I2C_SETSDA_OFFSET); } /* Read the serial clock line */ static int ivtv_getscl(struct ivtv *itv) { return read_reg(IVTV_REG_I2C_GETSCL_OFFSET) & 1; } /* Read the serial data line */ static int ivtv_getsda(struct ivtv *itv) { return read_reg(IVTV_REG_I2C_GETSDA_OFFSET) & 1; } /* Implement a short delay by polling the serial clock line */ static void ivtv_scldelay(struct ivtv *itv) { int i; for (i = 0; i < 5; ++i) ivtv_getscl(itv); } /* Wait for the serial clock line to become set to a specific value */ static int ivtv_waitscl(struct ivtv *itv, int val) { int i; ivtv_scldelay(itv); for (i = 0; i < 1000; ++i) { if (ivtv_getscl(itv) == val) return 1; } return 0; } /* Wait for the serial data line to become set to a specific value */ static int ivtv_waitsda(struct ivtv *itv, int val) { int i; ivtv_scldelay(itv); for (i = 0; i < 1000; ++i) { if (ivtv_getsda(itv) == val) return 1; } return 0; } /* Wait for the slave to issue an ACK */ static int ivtv_ack(struct ivtv *itv) { int ret = 0; if (ivtv_getscl(itv) == 1) { IVTV_DEBUG_HI_I2C("SCL was high starting an ack\n"); ivtv_setscl(itv, 0); if (!ivtv_waitscl(itv, 0)) { IVTV_DEBUG_I2C("Could not set SCL low starting an ack\n"); return -EREMOTEIO; } } ivtv_setsda(itv, 1); ivtv_scldelay(itv); ivtv_setscl(itv, 1); if (!ivtv_waitsda(itv, 0)) { IVTV_DEBUG_I2C("Slave did not ack\n"); ret = -EREMOTEIO; } ivtv_setscl(itv, 0); if (!ivtv_waitscl(itv, 0)) { IVTV_DEBUG_I2C("Failed to set SCL low after ACK\n"); ret = -EREMOTEIO; } return ret; } /* Write a single byte to the i2c bus and wait for the slave to ACK */ static int ivtv_sendbyte(struct ivtv *itv, unsigned char byte) { int i, bit; IVTV_DEBUG_HI_I2C("write %x\n",byte); for (i = 0; i < 8; ++i, byte<<=1) { ivtv_setscl(itv, 0); if (!ivtv_waitscl(itv, 0)) { IVTV_DEBUG_I2C("Error setting SCL low\n"); return -EREMOTEIO; } bit = (byte>>7)&1; ivtv_setsda(itv, bit); if (!ivtv_waitsda(itv, bit)) { IVTV_DEBUG_I2C("Error setting SDA\n"); return -EREMOTEIO; } ivtv_setscl(itv, 1); if (!ivtv_waitscl(itv, 1)) { IVTV_DEBUG_I2C("Slave not ready for bit\n"); return -EREMOTEIO; } } ivtv_setscl(itv, 0); if (!ivtv_waitscl(itv, 0)) { IVTV_DEBUG_I2C("Error setting SCL low\n"); return -EREMOTEIO; } return ivtv_ack(itv); } /* Read a byte from the i2c bus and send a NACK if applicable (i.e. for the final byte) */ static int ivtv_readbyte(struct ivtv *itv, unsigned char *byte, int nack) { int i; *byte = 0; ivtv_setsda(itv, 1); ivtv_scldelay(itv); for (i = 0; i < 8; ++i) { ivtv_setscl(itv, 0); ivtv_scldelay(itv); ivtv_setscl(itv, 1); if (!ivtv_waitscl(itv, 1)) { IVTV_DEBUG_I2C("Error setting SCL high\n"); return -EREMOTEIO; } *byte = ((*byte)<<1)|ivtv_getsda(itv); } ivtv_setscl(itv, 0); ivtv_scldelay(itv); ivtv_setsda(itv, nack); ivtv_scldelay(itv); ivtv_setscl(itv, 1); ivtv_scldelay(itv); ivtv_setscl(itv, 0); ivtv_scldelay(itv); IVTV_DEBUG_HI_I2C("read %x\n",*byte); return 0; } /* Issue a start condition on the i2c bus to alert slaves to prepare for an address write */ static int ivtv_start(struct ivtv *itv) { int sda; sda = ivtv_getsda(itv); if (sda != 1) { IVTV_DEBUG_HI_I2C("SDA was low at start\n"); ivtv_setsda(itv, 1); if (!ivtv_waitsda(itv, 1)) { IVTV_DEBUG_I2C("SDA stuck low\n"); return -EREMOTEIO; } } if (ivtv_getscl(itv) != 1) { ivtv_setscl(itv, 1); if (!ivtv_waitscl(itv, 1)) { IVTV_DEBUG_I2C("SCL stuck low at start\n"); return -EREMOTEIO; } } ivtv_setsda(itv, 0); ivtv_scldelay(itv); return 0; } /* Issue a stop condition on the i2c bus to release it */ static int ivtv_stop(struct ivtv *itv) { int i; if (ivtv_getscl(itv) != 0) { IVTV_DEBUG_HI_I2C("SCL not low when stopping\n"); ivtv_setscl(itv, 0); if (!ivtv_waitscl(itv, 0)) { IVTV_DEBUG_I2C("SCL could not be set low\n"); } } ivtv_setsda(itv, 0); ivtv_scldelay(itv); ivtv_setscl(itv, 1); if (!ivtv_waitscl(itv, 1)) { IVTV_DEBUG_I2C("SCL could not be set high\n"); return -EREMOTEIO; } ivtv_scldelay(itv); ivtv_setsda(itv, 1); if (!ivtv_waitsda(itv, 1)) { IVTV_DEBUG_I2C("resetting I2C\n"); for (i = 0; i < 16; ++i) { ivtv_setscl(itv, 0); ivtv_scldelay(itv); ivtv_setscl(itv, 1); ivtv_scldelay(itv); ivtv_setsda(itv, 1); } ivtv_waitsda(itv, 1); return -EREMOTEIO; } return 0; } /* Write a message to the given i2c slave. do_stop may be 0 to prevent issuing the i2c stop condition (when following with a read) */ static int ivtv_write(struct ivtv *itv, unsigned char addr, unsigned char *data, u32 len, int do_stop) { int retry, ret = -EREMOTEIO; u32 i; for (retry = 0; ret != 0 && retry < 8; ++retry) { ret = ivtv_start(itv); if (ret == 0) { ret = ivtv_sendbyte(itv, addr<<1); for (i = 0; ret == 0 && i < len; ++i) ret = ivtv_sendbyte(itv, data[i]); } if (ret != 0 || do_stop) { ivtv_stop(itv); } } if (ret) IVTV_DEBUG_I2C("i2c write to %x failed\n", addr); return ret; } /* Read data from the given i2c slave. A stop condition is always issued. */ static int ivtv_read(struct ivtv *itv, unsigned char addr, unsigned char *data, u32 len) { int retry, ret = -EREMOTEIO; u32 i; for (retry = 0; ret != 0 && retry < 8; ++retry) { ret = ivtv_start(itv); if (ret == 0) ret = ivtv_sendbyte(itv, (addr << 1) | 1); for (i = 0; ret == 0 && i < len; ++i) { ret = ivtv_readbyte(itv, &data[i], i == len - 1); } ivtv_stop(itv); } if (ret) IVTV_DEBUG_I2C("i2c read from %x failed\n", addr); return ret; } /* Kernel i2c transfer implementation. Takes a number of messages to be read or written. If a read follows a write, this will occur without an intervening stop condition */ static int ivtv_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct v4l2_device *v4l2_dev = i2c_get_adapdata(i2c_adap); struct ivtv *itv = to_ivtv(v4l2_dev); int retval; int i; mutex_lock(&itv->i2c_bus_lock); for (i = retval = 0; retval == 0 && i < num; i++) { if (msgs[i].flags & I2C_M_RD) retval = ivtv_read(itv, msgs[i].addr, msgs[i].buf, msgs[i].len); else { /* if followed by a read, don't stop */ int stop = !(i + 1 < num && msgs[i + 1].flags == I2C_M_RD); retval = ivtv_write(itv, msgs[i].addr, msgs[i].buf, msgs[i].len, stop); } } mutex_unlock(&itv->i2c_bus_lock); return retval ? retval : num; } /* Kernel i2c capabilities */ static u32 ivtv_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm ivtv_algo = { .master_xfer = ivtv_xfer, .functionality = ivtv_functionality, }; /* template for our-bit banger */ static struct i2c_adapter ivtv_i2c_adap_hw_template = { .name = "ivtv i2c driver", .algo = &ivtv_algo, .algo_data = NULL, /* filled from template */ .owner = THIS_MODULE, }; static void ivtv_setscl_old(void *data, int state) { struct ivtv *itv = (struct ivtv *)data; if (state) itv->i2c_state |= 0x01; else itv->i2c_state &= ~0x01; /* write them out */ /* write bits are inverted */ write_reg(~itv->i2c_state, IVTV_REG_I2C_SETSCL_OFFSET); } static void ivtv_setsda_old(void *data, int state) { struct ivtv *itv = (struct ivtv *)data; if (state) itv->i2c_state |= 0x01; else itv->i2c_state &= ~0x01; /* write them out */ /* write bits are inverted */ write_reg(~itv->i2c_state, IVTV_REG_I2C_SETSDA_OFFSET); } static int ivtv_getscl_old(void *data) { struct ivtv *itv = (struct ivtv *)data; return read_reg(IVTV_REG_I2C_GETSCL_OFFSET) & 1; } static int ivtv_getsda_old(void *data) { struct ivtv *itv = (struct ivtv *)data; return read_reg(IVTV_REG_I2C_GETSDA_OFFSET) & 1; } /* template for i2c-bit-algo */ static struct i2c_adapter ivtv_i2c_adap_template = { .name = "ivtv i2c driver", .algo = NULL, /* set by i2c-algo-bit */ .algo_data = NULL, /* filled from template */ .owner = THIS_MODULE, }; #define IVTV_ALGO_BIT_TIMEOUT (2) /* seconds */ static const struct i2c_algo_bit_data ivtv_i2c_algo_template = { .setsda = ivtv_setsda_old, .setscl = ivtv_setscl_old, .getsda = ivtv_getsda_old, .getscl = ivtv_getscl_old, .udelay = IVTV_DEFAULT_I2C_CLOCK_PERIOD / 2, /* microseconds */ .timeout = IVTV_ALGO_BIT_TIMEOUT * HZ, /* jiffies */ }; static struct i2c_client ivtv_i2c_client_template = { .name = "ivtv internal", }; /* init + register i2c adapter */ int init_ivtv_i2c(struct ivtv *itv) { int retval; IVTV_DEBUG_I2C("i2c init\n"); /* Sanity checks for the I2C hardware arrays. They must be the * same size. */ if (ARRAY_SIZE(hw_devicenames) != ARRAY_SIZE(hw_addrs)) { IVTV_ERR("Mismatched I2C hardware arrays\n"); return -ENODEV; } if (itv->options.newi2c > 0) { memcpy(&itv->i2c_adap, &ivtv_i2c_adap_hw_template, sizeof(struct i2c_adapter)); } else { memcpy(&itv->i2c_adap, &ivtv_i2c_adap_template, sizeof(struct i2c_adapter)); memcpy(&itv->i2c_algo, &ivtv_i2c_algo_template, sizeof(struct i2c_algo_bit_data)); } itv->i2c_algo.udelay = itv->options.i2c_clock_period / 2; itv->i2c_algo.data = itv; itv->i2c_adap.algo_data = &itv->i2c_algo; sprintf(itv->i2c_adap.name + strlen(itv->i2c_adap.name), " #%d", itv->instance); i2c_set_adapdata(&itv->i2c_adap, &itv->v4l2_dev); memcpy(&itv->i2c_client, &ivtv_i2c_client_template, sizeof(struct i2c_client)); itv->i2c_client.adapter = &itv->i2c_adap; itv->i2c_adap.dev.parent = &itv->pdev->dev; IVTV_DEBUG_I2C("setting scl and sda to 1\n"); ivtv_setscl(itv, 1); ivtv_setsda(itv, 1); if (itv->options.newi2c > 0) retval = i2c_add_adapter(&itv->i2c_adap); else retval = i2c_bit_add_bus(&itv->i2c_adap); return retval; } void exit_ivtv_i2c(struct ivtv *itv) { IVTV_DEBUG_I2C("i2c exit\n"); i2c_del_adapter(&itv->i2c_adap); }
gpl-2.0
poondog/joey-m7-GPE
mm/percpu-km.c
11248
2852
/* * mm/percpu-km.c - kernel memory based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2. * * Chunks are allocated as a contiguous kernel memory using gfp * allocation. This is to be used on nommu architectures. * * To use percpu-km, * * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig. * * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined. It's * not compatible with PER_CPU_KM. EMBED_FIRST_CHUNK should work * fine. * * - NUMA is not supported. When setting up the first chunk, * @cpu_distance_fn should be NULL or report all CPUs to be nearer * than or at LOCAL_DISTANCE. * * - It's best if the chunk size is power of two multiple of * PAGE_SIZE. Because each chunk is allocated as a contiguous * kernel memory block using alloc_pages(), memory will be wasted if * chunk size is not aligned. percpu-km code will whine about it. */ #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #error "contiguous percpu allocation is incompatible with paged first chunk" #endif #include <linux/log2.h> static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) { unsigned int cpu; for_each_possible_cpu(cpu) memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); return 0; } static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) { /* nada */ } static struct pcpu_chunk *pcpu_create_chunk(void) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; struct pcpu_chunk *chunk; struct page *pages; int i; chunk = pcpu_alloc_chunk(); if (!chunk) return NULL; pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); if (!pages) { pcpu_free_chunk(chunk); return NULL; } for (i = 0; i < nr_pages; i++) pcpu_set_page_chunk(nth_page(pages, i), chunk); chunk->data = pages; chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; if (chunk && chunk->data) __free_pages(chunk->data, order_base_2(nr_pages)); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return virt_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { size_t nr_pages, alloc_pages; /* all units must be in a single group */ if (ai->nr_groups != 1) { printk(KERN_CRIT "percpu: can't handle more than one groups\n"); return -EINVAL; } nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; alloc_pages = roundup_pow_of_two(nr_pages); if (alloc_pages > nr_pages) printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n", alloc_pages - nr_pages); return 0; }
gpl-2.0
flar2/m7-GPE
sound/pci/echoaudio/indigoiox_dsp.c
12528
2197
/************************************************************************ This file is part of Echo Digital Audio's generic driver library. Copyright Echo Digital Audio Corporation (c) 1998 - 2005 All rights reserved www.echoaudio.com This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> *************************************************************************/ static int update_vmixer_level(struct echoaudio *chip); static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe, int gain); static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id) { int err; DE_INIT(("init_hw() - Indigo IOx\n")); if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO_IOX)) return -ENODEV; err = init_dsp_comm_page(chip); if (err < 0) { DE_INIT(("init_hw - could not initialize DSP comm page\n")); return err; } chip->device_id = device_id; chip->subdevice_id = subdevice_id; chip->bad_board = TRUE; chip->dsp_code_to_load = FW_INDIGO_IOX_DSP; /* Since this card has no ASIC, mark it as loaded so everything works OK */ chip->asic_loaded = TRUE; chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL; err = load_firmware(chip); if (err < 0) return err; chip->bad_board = FALSE; DE_INIT(("init_hw done\n")); return err; } static int set_mixer_defaults(struct echoaudio *chip) { return init_line_levels(chip); }
gpl-2.0
jjhmod/rk3188-android-kernel
sound/core/seq/oss/seq_oss_writeq.c
13296
4196
/* * OSS compatible sequencer driver * * seq_oss_writeq.c - write queue and sync * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "seq_oss_writeq.h" #include "seq_oss_event.h" #include "seq_oss_timer.h" #include <sound/seq_oss_legacy.h> #include "../seq_lock.h" #include "../seq_clientmgr.h" #include <linux/wait.h> #include <linux/slab.h> /* * create a write queue record */ struct seq_oss_writeq * snd_seq_oss_writeq_new(struct seq_oss_devinfo *dp, int maxlen) { struct seq_oss_writeq *q; struct snd_seq_client_pool pool; if ((q = kzalloc(sizeof(*q), GFP_KERNEL)) == NULL) return NULL; q->dp = dp; q->maxlen = maxlen; spin_lock_init(&q->sync_lock); q->sync_event_put = 0; q->sync_time = 0; init_waitqueue_head(&q->sync_sleep); memset(&pool, 0, sizeof(pool)); pool.client = dp->cseq; pool.output_pool = maxlen; pool.output_room = maxlen / 2; snd_seq_oss_control(dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); return q; } /* * delete the write queue */ void snd_seq_oss_writeq_delete(struct seq_oss_writeq *q) { if (q) { snd_seq_oss_writeq_clear(q); /* to be sure */ kfree(q); } } /* * reset the write queue */ void snd_seq_oss_writeq_clear(struct seq_oss_writeq *q) { struct snd_seq_remove_events reset; memset(&reset, 0, sizeof(reset)); reset.remove_mode = SNDRV_SEQ_REMOVE_OUTPUT; /* remove all */ snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_REMOVE_EVENTS, &reset); /* wake up sleepers if any */ snd_seq_oss_writeq_wakeup(q, 0); } /* * wait until the write buffer has enough room */ int snd_seq_oss_writeq_sync(struct seq_oss_writeq *q) { struct seq_oss_devinfo *dp = q->dp; abstime_t time; time = snd_seq_oss_timer_cur_tick(dp->timer); if (q->sync_time >= time) return 0; /* already finished */ if (! q->sync_event_put) { struct snd_seq_event ev; union evrec *rec; /* put echoback event */ memset(&ev, 0, sizeof(ev)); ev.flags = 0; ev.type = SNDRV_SEQ_EVENT_ECHO; ev.time.tick = time; /* echo back to itself */ snd_seq_oss_fill_addr(dp, &ev, dp->addr.client, dp->addr.port); rec = (union evrec *)&ev.data; rec->t.code = SEQ_SYNCTIMER; rec->t.time = time; q->sync_event_put = 1; snd_seq_kernel_client_enqueue_blocking(dp->cseq, &ev, NULL, 0, 0); } wait_event_interruptible_timeout(q->sync_sleep, ! q->sync_event_put, HZ); if (signal_pending(current)) /* interrupted - return 0 to finish sync */ q->sync_event_put = 0; if (! q->sync_event_put || q->sync_time >= time) return 0; return 1; } /* * wake up sync - echo event was catched */ void snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time) { unsigned long flags; spin_lock_irqsave(&q->sync_lock, flags); q->sync_time = time; q->sync_event_put = 0; if (waitqueue_active(&q->sync_sleep)) { wake_up(&q->sync_sleep); } spin_unlock_irqrestore(&q->sync_lock, flags); } /* * return the unused pool size */ int snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q) { struct snd_seq_client_pool pool; pool.client = q->dp->cseq; snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); return pool.output_free; } /* * set output threshold size from ioctl */ void snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int val) { struct snd_seq_client_pool pool; pool.client = q->dp->cseq; snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); pool.output_room = val; snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); }
gpl-2.0
chenleicpp/p970_kernel_3.0.8
arch/alpha/kernel/core_polaris.c
13808
4523
/* * linux/arch/alpha/kernel/core_polaris.c * * POLARIS chip-specific code */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_polaris.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address. This is fairly straightforward * on POLARIS, since the chip itself generates Type 0 or Type 1 * cycles automatically depending on the bus number (Bus 0 is * hardwired to Type 0, all others are Type 1. Peer bridges * are not supported). * * All types: * * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., scsi and ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, u8 *type1) { u8 bus = pbus->number; *type1 = (bus == 0) ? 0 : 1; *pci_addr = (bus << 16) | (device_fn << 8) | (where) | POLARIS_DENSE_CONFIG_BASE; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," " returning address 0x%p\n" bus, device_fn, where, *pci_addr)); return 0; } static int polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops polaris_pci_ops = { .read = polaris_read_config, .write = polaris_write_config, }; void __init polaris_init_arch(void) { struct pci_controller *hose; /* May need to initialize error reporting (see PCICTL0/1), but * for now assume that the firmware has done the right thing * already. */ #if 0 printk("polaris_init_arch(): trusting firmware for setup\n"); #endif /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = 0; hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR; hose->sparse_io_base = 0; hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR; hose->sg_isa = hose->sg_pci = NULL; /* The I/O window is fixed at 2G @ 2G. */ __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; } static inline void polaris_pci_clr_err(void) { *(vusp)POLARIS_W_STATUS; /* Write 1's to settable bits to clear errors */ *(vusp)POLARIS_W_STATUS = 0x7800; mb(); *(vusp)POLARIS_W_STATUS; } void polaris_machine_check(unsigned long vector, unsigned long la_ptr) { /* Clear the error before any reporting. */ mb(); mb(); draina(); polaris_pci_clr_err(); wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "POLARIS", mcheck_expected(0)); }
gpl-2.0
sytuxww/android2.3-dm8168
arch/mips/kernel/cpu-bugs64.c
241
7610
/* * Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/stddef.h> #include <asm/bugs.h> #include <asm/compiler.h> #include <asm/cpu.h> #include <asm/fpu.h> #include <asm/mipsregs.h> #include <asm/system.h> static char bug64hit[] __initdata = "reliable operation impossible!\n%s"; static char nowar[] __initdata = "Please report to <linux-mips@linux-mips.org>."; static char r4kwar[] __initdata = "Enable CPU_R4000_WORKAROUNDS to rectify."; static char daddiwar[] __initdata = "Enable CPU_DADDI_WORKAROUNDS to rectify."; static inline void align_mod(const int align, const int mod) { asm volatile( ".set push\n\t" ".set noreorder\n\t" ".balign %0\n\t" ".rept %1\n\t" "nop\n\t" ".endr\n\t" ".set pop" : : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod)); } static inline void mult_sh_align_mod(long *v1, long *v2, long *w, const int align, const int mod) { unsigned long flags; int m1, m2; long p, s, lv1, lv2, lw; /* * We want the multiply and the shift to be isolated from the * rest of the code to disable gcc optimizations. Hence the * asm statements that execute nothing, but make gcc not know * what the values of m1, m2 and s are and what lv2 and p are * used for. */ local_irq_save(flags); /* * The following code leads to a wrong result of the first * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId * 00000422 or 00000430, respectively). * * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for * details. I got no permission to duplicate them here, * sigh... --macro */ asm volatile( "" : "=r" (m1), "=r" (m2), "=r" (s) : "0" (5), "1" (8), "2" (5)); align_mod(align, mod); /* * The trailing nop is needed to fullfill the two-instruction * requirement between reading hi/lo and staring a mult/div. * Leaving it out may cause gas insert a nop itself breaking * the desired alignment of the next chunk. */ asm volatile( ".set push\n\t" ".set noat\n\t" ".set noreorder\n\t" ".set nomacro\n\t" "mult %2, %3\n\t" "dsll32 %0, %4, %5\n\t" "mflo $0\n\t" "dsll32 %1, %4, %5\n\t" "nop\n\t" ".set pop" : "=&r" (lv1), "=r" (lw) : "r" (m1), "r" (m2), "r" (s), "I" (0) : "hi", "lo", GCC_REG_ACCUM); /* We have to use single integers for m1 and m2 and a double * one for p to be sure the mulsidi3 gcc's RTL multiplication * instruction has the workaround applied. Older versions of * gcc have correct umulsi3 and mulsi3, but other * multiplication variants lack the workaround. */ asm volatile( "" : "=r" (m1), "=r" (m2), "=r" (s) : "0" (m1), "1" (m2), "2" (s)); align_mod(align, mod); p = m1 * m2; lv2 = s << 32; asm volatile( "" : "=r" (lv2) : "0" (lv2), "r" (p)); local_irq_restore(flags); *v1 = lv1; *v2 = lv2; *w = lw; } static inline void check_mult_sh(void) { long v1[8], v2[8], w[8]; int bug, fix, i; printk("Checking for the multiply/shift bug... "); /* * Testing discovered false negatives for certain code offsets * into cache lines. Hence we test all possible offsets for * the worst assumption of an R4000 I-cache line width of 32 * bytes. * * We can't use a loop as alignment directives need to be * immediates. */ mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0); mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1); mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2); mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3); mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4); mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5); mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6); mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7); bug = 0; for (i = 0; i < 8; i++) if (v1[i] != w[i]) bug = 1; if (bug == 0) { printk("no.\n"); return; } printk("yes, workaround... "); fix = 1; for (i = 0; i < 8; i++) if (v2[i] != w[i]) fix = 0; if (fix == 1) { printk("yes.\n"); return; } printk("no.\n"); panic(bug64hit, !R4000_WAR ? r4kwar : nowar); } static volatile int daddi_ov __cpuinitdata; asmlinkage void __init do_daddi_ov(struct pt_regs *regs) { daddi_ov = 1; regs->cp0_epc += 4; } static inline void check_daddi(void) { extern asmlinkage void handle_daddi_ov(void); unsigned long flags; void *handler; long v, tmp; printk("Checking for the daddi bug... "); local_irq_save(flags); handler = set_except_vector(12, handle_daddi_ov); /* * The following code fails to trigger an overflow exception * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or * 00000430, respectively). * * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and * 3.0" by MIPS Technologies, Inc., erratum #23 for details. * I got no permission to duplicate it here, sigh... --macro */ asm volatile( ".set push\n\t" ".set noat\n\t" ".set noreorder\n\t" ".set nomacro\n\t" "addiu %1, $0, %2\n\t" "dsrl %1, %1, 1\n\t" #ifdef HAVE_AS_SET_DADDI ".set daddi\n\t" #endif "daddi %0, %1, %3\n\t" ".set pop" : "=r" (v), "=&r" (tmp) : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); set_except_vector(12, handler); local_irq_restore(flags); if (daddi_ov) { printk("no.\n"); return; } printk("yes, workaround... "); local_irq_save(flags); handler = set_except_vector(12, handle_daddi_ov); asm volatile( "addiu %1, $0, %2\n\t" "dsrl %1, %1, 1\n\t" "daddi %0, %1, %3" : "=r" (v), "=&r" (tmp) : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); set_except_vector(12, handler); local_irq_restore(flags); if (daddi_ov) { printk("yes.\n"); return; } printk("no.\n"); panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } int daddiu_bug = -1; static inline void check_daddiu(void) { long v, w, tmp; printk("Checking for the daddiu bug... "); /* * The following code leads to a wrong result of daddiu when * executed on R4400 rev. 1.0 (PRId 00000440). * * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by * MIPS Technologies, Inc., erratum #7 for details. * * According to "MIPS R4000PC/SC Errata, Processor Revision * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and * 00000430, respectively), too. Testing failed to trigger it * so far. * * I got no permission to duplicate the errata here, sigh... * --macro */ asm volatile( ".set push\n\t" ".set noat\n\t" ".set noreorder\n\t" ".set nomacro\n\t" "addiu %2, $0, %3\n\t" "dsrl %2, %2, 1\n\t" #ifdef HAVE_AS_SET_DADDI ".set daddi\n\t" #endif "daddiu %0, %2, %4\n\t" "addiu %1, $0, %4\n\t" "daddu %1, %2\n\t" ".set pop" : "=&r" (v), "=&r" (w), "=&r" (tmp) : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); daddiu_bug = v != w; if (!daddiu_bug) { printk("no.\n"); return; } printk("yes, workaround... "); asm volatile( "addiu %2, $0, %3\n\t" "dsrl %2, %2, 1\n\t" "daddiu %0, %2, %4\n\t" "addiu %1, $0, %4\n\t" "daddu %1, %2" : "=&r" (v), "=&r" (w), "=&r" (tmp) : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); if (v == w) { printk("yes.\n"); return; } printk("no.\n"); panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } void __init check_bugs64_early(void) { check_mult_sh(); check_daddiu(); } void __init check_bugs64(void) { check_daddi(); }
gpl-2.0
tuxkids/kernel_ics
drivers/misc/lkdtm.c
753
15107
/* * Kprobe module for testing crash dumps * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2006 * * Author: Ankita Garg <ankita@in.ibm.com> * * This module induces system failures at predefined crashpoints to * evaluate the reliability of crash dumps obtained using different dumping * solutions. * * It is adapted from the Linux Kernel Dump Test Tool by * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> * * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net> * * See Documentation/fault-injection/provoke-crashes.txt for instructions */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/buffer_head.h> #include <linux/kprobes.h> #include <linux/list.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> #include <linux/slab.h> #include <scsi/scsi_cmnd.h> #include <linux/debugfs.h> #ifdef CONFIG_IDE #include <linux/ide.h> #endif #define DEFAULT_COUNT 10 #define REC_NUM_DEFAULT 10 enum cname { INVALID, INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY, FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD, IDE_CORE_CP, DIRECT, }; enum ctype { NONE, PANIC, BUG, EXCEPTION, LOOP, OVERFLOW, CORRUPT_STACK, UNALIGNED_LOAD_STORE_WRITE, OVERWRITE_ALLOCATION, WRITE_AFTER_FREE, SOFTLOCKUP, HARDLOCKUP, HUNG_TASK, }; static char* cp_name[] = { "INT_HARDWARE_ENTRY", "INT_HW_IRQ_EN", "INT_TASKLET_ENTRY", "FS_DEVRW", "MEM_SWAPOUT", "TIMERADD", "SCSI_DISPATCH_CMD", "IDE_CORE_CP", "DIRECT", }; static char* cp_type[] = { "PANIC", "BUG", "EXCEPTION", "LOOP", "OVERFLOW", "CORRUPT_STACK", "UNALIGNED_LOAD_STORE_WRITE", "OVERWRITE_ALLOCATION", "WRITE_AFTER_FREE", "SOFTLOCKUP", "HARDLOCKUP", "HUNG_TASK", }; static struct jprobe lkdtm; static int lkdtm_parse_commandline(void); static void lkdtm_handler(void); static char* cpoint_name; static char* cpoint_type; static int cpoint_count = DEFAULT_COUNT; static int recur_count = REC_NUM_DEFAULT; static enum cname cpoint = INVALID; static enum ctype cptype = NONE; static int count = DEFAULT_COUNT; module_param(recur_count, int, 0644); MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ "default is 10"); module_param(cpoint_name, charp, 0644); MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed"); module_param(cpoint_type, charp, 0644); MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\ "hitting the crash point"); module_param(cpoint_count, int, 0644); MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\ "crash point is to be hit to trigger action"); static unsigned int jp_do_irq(unsigned int irq) { lkdtm_handler(); jprobe_return(); return 0; } static irqreturn_t jp_handle_irq_event(unsigned int irq, struct irqaction *action) { lkdtm_handler(); jprobe_return(); return 0; } static void jp_tasklet_action(struct softirq_action *a) { lkdtm_handler(); jprobe_return(); } static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) { lkdtm_handler(); jprobe_return(); } struct scan_control; static unsigned long jp_shrink_inactive_list(unsigned long max_scan, struct zone *zone, struct scan_control *sc) { lkdtm_handler(); jprobe_return(); return 0; } static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) { lkdtm_handler(); jprobe_return(); return 0; } static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd) { lkdtm_handler(); jprobe_return(); return 0; } #ifdef CONFIG_IDE int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev, unsigned int cmd, unsigned long arg) { lkdtm_handler(); jprobe_return(); return 0; } #endif /* Return the crashpoint number or NONE if the name is invalid */ static enum ctype parse_cp_type(const char *what, size_t count) { int i; for (i = 0; i < ARRAY_SIZE(cp_type); i++) { if (!strcmp(what, cp_type[i])) return i + 1; } return NONE; } static const char *cp_type_to_str(enum ctype type) { if (type == NONE || type < 0 || type > ARRAY_SIZE(cp_type)) return "None"; return cp_type[type - 1]; } static const char *cp_name_to_str(enum cname name) { if (name == INVALID || name < 0 || name > ARRAY_SIZE(cp_name)) return "INVALID"; return cp_name[name - 1]; } static int lkdtm_parse_commandline(void) { int i; if (cpoint_count < 1 || recur_count < 1) return -EINVAL; count = cpoint_count; /* No special parameters */ if (!cpoint_type && !cpoint_name) return 0; /* Neither or both of these need to be set */ if (!cpoint_type || !cpoint_name) return -EINVAL; cptype = parse_cp_type(cpoint_type, strlen(cpoint_type)); if (cptype == NONE) return -EINVAL; for (i = 0; i < ARRAY_SIZE(cp_name); i++) { if (!strcmp(cpoint_name, cp_name[i])) { cpoint = i + 1; return 0; } } /* Could not find a valid crash point */ return -EINVAL; } static int recursive_loop(int a) { char buf[1024]; memset(buf,0xFF,1024); recur_count--; if (!recur_count) return 0; else return recursive_loop(a); } static void lkdtm_do_action(enum ctype which) { switch (which) { case PANIC: panic("dumptest"); break; case BUG: BUG(); break; case EXCEPTION: *((int *) 0) = 0; break; case LOOP: for (;;) ; break; case OVERFLOW: (void) recursive_loop(0); break; case CORRUPT_STACK: { volatile u32 data[8]; volatile u32 *p = data; p[12] = 0x12345678; break; } case UNALIGNED_LOAD_STORE_WRITE: { static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; u32 *p; u32 val = 0x12345678; p = (u32 *)(data + 1); if (*p == 0) val = 0x87654321; *p = val; break; } case OVERWRITE_ALLOCATION: { size_t len = 1020; u32 *data = kmalloc(len, GFP_KERNEL); data[1024 / sizeof(u32)] = 0x12345678; kfree(data); break; } case WRITE_AFTER_FREE: { size_t len = 1024; u32 *data = kmalloc(len, GFP_KERNEL); kfree(data); schedule(); memset(data, 0x78, len); break; } case SOFTLOCKUP: preempt_disable(); for (;;) cpu_relax(); break; case HARDLOCKUP: local_irq_disable(); for (;;) cpu_relax(); break; case HUNG_TASK: set_current_state(TASK_UNINTERRUPTIBLE); schedule(); break; case NONE: default: break; } } static void lkdtm_handler(void) { count--; printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", cp_name_to_str(cpoint), cp_type_to_str(cptype), count); if (count == 0) { lkdtm_do_action(cptype); count = cpoint_count; } } static int lkdtm_register_cpoint(enum cname which) { int ret; cpoint = INVALID; if (lkdtm.entry != NULL) unregister_jprobe(&lkdtm); switch (which) { case DIRECT: lkdtm_do_action(cptype); return 0; case INT_HARDWARE_ENTRY: lkdtm.kp.symbol_name = "do_IRQ"; lkdtm.entry = (kprobe_opcode_t*) jp_do_irq; break; case INT_HW_IRQ_EN: lkdtm.kp.symbol_name = "handle_IRQ_event"; lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event; break; case INT_TASKLET_ENTRY: lkdtm.kp.symbol_name = "tasklet_action"; lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action; break; case FS_DEVRW: lkdtm.kp.symbol_name = "ll_rw_block"; lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block; break; case MEM_SWAPOUT: lkdtm.kp.symbol_name = "shrink_inactive_list"; lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list; break; case TIMERADD: lkdtm.kp.symbol_name = "hrtimer_start"; lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start; break; case SCSI_DISPATCH_CMD: lkdtm.kp.symbol_name = "scsi_dispatch_cmd"; lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd; break; case IDE_CORE_CP: #ifdef CONFIG_IDE lkdtm.kp.symbol_name = "generic_ide_ioctl"; lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; #else printk(KERN_INFO "lkdtm: Crash point not available\n"); return -EINVAL; #endif break; default: printk(KERN_INFO "lkdtm: Invalid Crash Point\n"); return -EINVAL; } cpoint = which; if ((ret = register_jprobe(&lkdtm)) < 0) { printk(KERN_INFO "lkdtm: Couldn't register jprobe\n"); cpoint = INVALID; } return ret; } static ssize_t do_register_entry(enum cname which, struct file *f, const char __user *user_buf, size_t count, loff_t *off) { char *buf; int err; if (count >= PAGE_SIZE) return -EINVAL; buf = (char *)__get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_user(buf, user_buf, count)) { free_page((unsigned long) buf); return -EFAULT; } /* NULL-terminate and remove enter */ buf[count] = '\0'; strim(buf); cptype = parse_cp_type(buf, count); free_page((unsigned long) buf); if (cptype == NONE) return -EINVAL; err = lkdtm_register_cpoint(which); if (err < 0) return err; *off += count; return count; } /* Generic read callback that just prints out the available crash types */ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, size_t count, loff_t *off) { char *buf; int i, n, out; buf = (char *)__get_free_page(GFP_KERNEL); n = snprintf(buf, PAGE_SIZE, "Available crash types:\n"); for (i = 0; i < ARRAY_SIZE(cp_type); i++) n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]); buf[n] = '\0'; out = simple_read_from_buffer(user_buf, count, off, buf, n); free_page((unsigned long) buf); return out; } static int lkdtm_debugfs_open(struct inode *inode, struct file *file) { return 0; } static ssize_t int_hardware_entry(struct file *f, const char __user *buf, size_t count, loff_t *off) { return do_register_entry(INT_HARDWARE_ENTRY, f, buf, count, off); } static ssize_t int_hw_irq_en(struct file *f, const char __user *buf, size_t count, loff_t *off) { return do_register_entry(INT_HW_IRQ_EN, f, buf, count, off); } static ssize_t int_tasklet_entry(struct file *f, const char __user *buf, size_t count, loff_t *off) { return do_register_entry(INT_TASKLET_ENTRY, f, buf, count, off); } static ssize_t fs_devrw_entry(struct file *f, const char __user *buf, size_t count, loff_t *off) { return do_register_entry(FS_DEVRW, f, buf, count, off); } static ssize_t mem_swapout_entry(struct file *f, const char __user *buf, size_t count, loff_t *off) { return do_register_entry(MEM_SWAPOUT, f, buf, count, off); } static ssize_t timeradd_entry(struct file *f, const char __user *buf, size_t count, loff_t *off) { return do_register_entry(TIMERADD, f, buf, count, off); } static ssize_t scsi_dispatch_cmd_entry(struct file *f, const char __user *buf, size_t count, loff_t *off) { return do_register_entry(SCSI_DISPATCH_CMD, f, buf, count, off); } static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf, size_t count, loff_t *off) { return do_register_entry(IDE_CORE_CP, f, buf, count, off); } /* Special entry to just crash directly. Available without KPROBEs */ static ssize_t direct_entry(struct file *f, const char __user *user_buf, size_t count, loff_t *off) { enum ctype type; char *buf; if (count >= PAGE_SIZE) return -EINVAL; if (count < 1) return -EINVAL; buf = (char *)__get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_user(buf, user_buf, count)) { free_page((unsigned long) buf); return -EFAULT; } /* NULL-terminate and remove enter */ buf[count] = '\0'; strim(buf); type = parse_cp_type(buf, count); free_page((unsigned long) buf); if (type == NONE) return -EINVAL; printk(KERN_INFO "lkdtm: Performing direct entry %s\n", cp_type_to_str(type)); lkdtm_do_action(type); *off += count; return count; } struct crash_entry { const char *name; const struct file_operations fops; }; static const struct crash_entry crash_entries[] = { {"DIRECT", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = direct_entry} }, {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = int_hardware_entry} }, {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = int_hw_irq_en} }, {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = int_tasklet_entry} }, {"FS_DEVRW", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = fs_devrw_entry} }, {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = mem_swapout_entry} }, {"TIMERADD", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = timeradd_entry} }, {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = scsi_dispatch_cmd_entry} }, {"IDE_CORE_CP", {.read = lkdtm_debugfs_read, .open = lkdtm_debugfs_open, .write = ide_core_cp_entry} }, }; static struct dentry *lkdtm_debugfs_root; static int __init lkdtm_module_init(void) { int ret = -EINVAL; int n_debugfs_entries = 1; /* Assume only the direct entry */ int i; /* Register debugfs interface */ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); if (!lkdtm_debugfs_root) { printk(KERN_ERR "lkdtm: creating root dir failed\n"); return -ENODEV; } #ifdef CONFIG_KPROBES n_debugfs_entries = ARRAY_SIZE(crash_entries); #endif for (i = 0; i < n_debugfs_entries; i++) { const struct crash_entry *cur = &crash_entries[i]; struct dentry *de; de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, NULL, &cur->fops); if (de == NULL) { printk(KERN_ERR "lkdtm: could not create %s\n", cur->name); goto out_err; } } if (lkdtm_parse_commandline() == -EINVAL) { printk(KERN_INFO "lkdtm: Invalid command\n"); goto out_err; } if (cpoint != INVALID && cptype != NONE) { ret = lkdtm_register_cpoint(cpoint); if (ret < 0) { printk(KERN_INFO "lkdtm: Invalid crash point %d\n", cpoint); goto out_err; } printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n", cpoint_name, cpoint_type); } else { printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n"); } return 0; out_err: debugfs_remove_recursive(lkdtm_debugfs_root); return ret; } static void __exit lkdtm_module_exit(void) { debugfs_remove_recursive(lkdtm_debugfs_root); unregister_jprobe(&lkdtm); printk(KERN_INFO "lkdtm: Crash point unregistered\n"); } module_init(lkdtm_module_init); module_exit(lkdtm_module_exit); MODULE_LICENSE("GPL");
gpl-2.0
skritchz/msm_2.6.38
fs/ecryptfs/read_write.c
1009
11629
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/fs.h> #include <linux/pagemap.h> #include "ecryptfs_kernel.h" /** * ecryptfs_write_lower * @ecryptfs_inode: The eCryptfs inode * @data: Data to write * @offset: Byte offset in the lower file to which to write the data * @size: Number of bytes from @data to write at @offset in the lower * file * * Write data to the lower file. * * Returns bytes written on success; less than zero on error */ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, loff_t offset, size_t size) { struct ecryptfs_inode_info *inode_info; mm_segment_t fs_save; ssize_t rc; inode_info = ecryptfs_inode_to_private(ecryptfs_inode); mutex_lock(&inode_info->lower_file_mutex); BUG_ON(!inode_info->lower_file); inode_info->lower_file->f_pos = offset; fs_save = get_fs(); set_fs(get_ds()); rc = vfs_write(inode_info->lower_file, data, size, &inode_info->lower_file->f_pos); set_fs(fs_save); mutex_unlock(&inode_info->lower_file_mutex); mark_inode_dirty_sync(ecryptfs_inode); return rc; } /** * ecryptfs_write_lower_page_segment * @ecryptfs_inode: The eCryptfs inode * @page_for_lower: The page containing the data to be written to the * lower file * @offset_in_page: The offset in the @page_for_lower from which to * start writing the data * @size: The amount of data from @page_for_lower to write to the * lower file * * Determines the byte offset in the file for the given page and * offset within the page, maps the page, and makes the call to write * the contents of @page_for_lower to the lower inode. * * Returns zero on success; non-zero otherwise */ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode, struct page *page_for_lower, size_t offset_in_page, size_t size) { char *virt; loff_t offset; int rc; offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT) + offset_in_page); virt = kmap(page_for_lower); rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size); if (rc > 0) rc = 0; kunmap(page_for_lower); return rc; } /** * ecryptfs_write * @ecryptfs_inode: The eCryptfs file into which to write * @data: Virtual address where data to write is located * @offset: Offset in the eCryptfs file at which to begin writing the * data from @data * @size: The number of bytes to write from @data * * Write an arbitrary amount of data to an arbitrary location in the * eCryptfs inode page cache. This is done on a page-by-page, and then * by an extent-by-extent, basis; individual extents are encrypted and * written to the lower page cache (via VFS writes). This function * takes care of all the address translation to locations in the lower * filesystem; it also handles truncate events, writing out zeros * where necessary. * * Returns zero on success; non-zero otherwise */ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, size_t size) { struct page *ecryptfs_page; struct ecryptfs_crypt_stat *crypt_stat; char *ecryptfs_page_virt; loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode); loff_t data_offset = 0; loff_t pos; int rc = 0; crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat; /* * if we are writing beyond current size, then start pos * at the current size - we'll fill in zeros from there. */ if (offset > ecryptfs_file_size) pos = ecryptfs_file_size; else pos = offset; while (pos < (offset + size)) { pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); size_t total_remaining_bytes = ((offset + size) - pos); if (num_bytes > total_remaining_bytes) num_bytes = total_remaining_bytes; if (pos < offset) { /* remaining zeros to write, up to destination offset */ size_t total_remaining_zeros = (offset - pos); if (num_bytes > total_remaining_zeros) num_bytes = total_remaining_zeros; } ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode, ecryptfs_page_idx); if (IS_ERR(ecryptfs_page)) { rc = PTR_ERR(ecryptfs_page); printk(KERN_ERR "%s: Error getting page at " "index [%ld] from eCryptfs inode " "mapping; rc = [%d]\n", __func__, ecryptfs_page_idx, rc); goto out; } ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0); /* * pos: where we're now writing, offset: where the request was * If current pos is before request, we are filling zeros * If we are at or beyond request, we are writing the *data* * If we're in a fresh page beyond eof, zero it in either case */ if (pos < offset || !start_offset_in_page) { /* We are extending past the previous end of the file. * Fill in zero values to the end of the page */ memset(((char *)ecryptfs_page_virt + start_offset_in_page), 0, PAGE_CACHE_SIZE - start_offset_in_page); } /* pos >= offset, we are now writing the data request */ if (pos >= offset) { memcpy(((char *)ecryptfs_page_virt + start_offset_in_page), (data + data_offset), num_bytes); data_offset += num_bytes; } kunmap_atomic(ecryptfs_page_virt, KM_USER0); flush_dcache_page(ecryptfs_page); SetPageUptodate(ecryptfs_page); unlock_page(ecryptfs_page); if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) rc = ecryptfs_encrypt_page(ecryptfs_page); else rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, ecryptfs_page, start_offset_in_page, data_offset); page_cache_release(ecryptfs_page); if (rc) { printk(KERN_ERR "%s: Error encrypting " "page; rc = [%d]\n", __func__, rc); goto out; } pos += num_bytes; } if ((offset + size) > ecryptfs_file_size) { i_size_write(ecryptfs_inode, (offset + size)); if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) { rc = ecryptfs_write_inode_size_to_metadata( ecryptfs_inode); if (rc) { printk(KERN_ERR "Problem with " "ecryptfs_write_inode_size_to_metadata; " "rc = [%d]\n", rc); goto out; } } } out: return rc; } /** * ecryptfs_read_lower * @data: The read data is stored here by this function * @offset: Byte offset in the lower file from which to read the data * @size: Number of bytes to read from @offset of the lower file and * store into @data * @ecryptfs_inode: The eCryptfs inode * * Read @size bytes of data at byte offset @offset from the lower * inode into memory location @data. * * Returns bytes read on success; 0 on EOF; less than zero on error */ int ecryptfs_read_lower(char *data, loff_t offset, size_t size, struct inode *ecryptfs_inode) { struct ecryptfs_inode_info *inode_info = ecryptfs_inode_to_private(ecryptfs_inode); mm_segment_t fs_save; ssize_t rc; mutex_lock(&inode_info->lower_file_mutex); BUG_ON(!inode_info->lower_file); inode_info->lower_file->f_pos = offset; fs_save = get_fs(); set_fs(get_ds()); rc = vfs_read(inode_info->lower_file, data, size, &inode_info->lower_file->f_pos); set_fs(fs_save); mutex_unlock(&inode_info->lower_file_mutex); return rc; } /** * ecryptfs_read_lower_page_segment * @page_for_ecryptfs: The page into which data for eCryptfs will be * written * @offset_in_page: Offset in @page_for_ecryptfs from which to start * writing * @size: The number of bytes to write into @page_for_ecryptfs * @ecryptfs_inode: The eCryptfs inode * * Determines the byte offset in the file for the given page and * offset within the page, maps the page, and makes the call to read * the contents of @page_for_ecryptfs from the lower inode. * * Returns zero on success; non-zero otherwise */ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, pgoff_t page_index, size_t offset_in_page, size_t size, struct inode *ecryptfs_inode) { char *virt; loff_t offset; int rc; offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page); virt = kmap(page_for_ecryptfs); rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode); if (rc > 0) rc = 0; kunmap(page_for_ecryptfs); flush_dcache_page(page_for_ecryptfs); return rc; } #if 0 /** * ecryptfs_read * @data: The virtual address into which to write the data read (and * possibly decrypted) from the lower file * @offset: The offset in the decrypted view of the file from which to * read into @data * @size: The number of bytes to read into @data * @ecryptfs_file: The eCryptfs file from which to read * * Read an arbitrary amount of data from an arbitrary location in the * eCryptfs page cache. This is done on an extent-by-extent basis; * individual extents are decrypted and read from the lower page * cache (via VFS reads). This function takes care of all the * address translation to locations in the lower filesystem. * * Returns zero on success; non-zero otherwise */ int ecryptfs_read(char *data, loff_t offset, size_t size, struct file *ecryptfs_file) { struct inode *ecryptfs_inode = ecryptfs_file->f_dentry->d_inode; struct page *ecryptfs_page; char *ecryptfs_page_virt; loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode); loff_t data_offset = 0; loff_t pos; int rc = 0; if ((offset + size) > ecryptfs_file_size) { rc = -EINVAL; printk(KERN_ERR "%s: Attempt to read data past the end of the " "file; offset = [%lld]; size = [%td]; " "ecryptfs_file_size = [%lld]\n", __func__, offset, size, ecryptfs_file_size); goto out; } pos = offset; while (pos < (offset + size)) { pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); size_t total_remaining_bytes = ((offset + size) - pos); if (num_bytes > total_remaining_bytes) num_bytes = total_remaining_bytes; ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode, ecryptfs_page_idx); if (IS_ERR(ecryptfs_page)) { rc = PTR_ERR(ecryptfs_page); printk(KERN_ERR "%s: Error getting page at " "index [%ld] from eCryptfs inode " "mapping; rc = [%d]\n", __func__, ecryptfs_page_idx, rc); goto out; } ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0); memcpy((data + data_offset), ((char *)ecryptfs_page_virt + start_offset_in_page), num_bytes); kunmap_atomic(ecryptfs_page_virt, KM_USER0); flush_dcache_page(ecryptfs_page); SetPageUptodate(ecryptfs_page); unlock_page(ecryptfs_page); page_cache_release(ecryptfs_page); pos += num_bytes; data_offset += num_bytes; } out: return rc; } #endif /* 0 */
gpl-2.0
focuschou/android_kernel_samsung_piranha
drivers/s390/net/netiucv.c
2801
58371
/* * IUCV network driver * * Copyright IBM Corp. 2001, 2009 * * Author(s): * Original netiucv driver: * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) * Sysfs integration and all bugs therein: * Cornelia Huck (cornelia.huck@de.ibm.com) * PM functions: * Ursula Braun (ursula.braun@de.ibm.com) * * Documentation used: * the source of the original IUCV driver by: * Stefan Hegewald <hegewald@de.ibm.com> * Hartmut Penner <hpenner@de.ibm.com> * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #define KMSG_COMPONENT "netiucv" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #undef DEBUG #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/bitops.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/device.h> #include <linux/ip.h> #include <linux/if_arp.h> #include <linux/tcp.h> #include <linux/skbuff.h> #include <linux/ctype.h> #include <net/dst.h> #include <asm/io.h> #include <asm/uaccess.h> #include <net/iucv/iucv.h> #include "fsm.h" MODULE_AUTHOR ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); /** * Debug Facility stuff */ #define IUCV_DBF_SETUP_NAME "iucv_setup" #define IUCV_DBF_SETUP_LEN 32 #define IUCV_DBF_SETUP_PAGES 2 #define IUCV_DBF_SETUP_NR_AREAS 1 #define IUCV_DBF_SETUP_LEVEL 3 #define IUCV_DBF_DATA_NAME "iucv_data" #define IUCV_DBF_DATA_LEN 128 #define IUCV_DBF_DATA_PAGES 2 #define IUCV_DBF_DATA_NR_AREAS 1 #define IUCV_DBF_DATA_LEVEL 2 #define IUCV_DBF_TRACE_NAME "iucv_trace" #define IUCV_DBF_TRACE_LEN 16 #define IUCV_DBF_TRACE_PAGES 4 #define IUCV_DBF_TRACE_NR_AREAS 1 #define IUCV_DBF_TRACE_LEVEL 3 #define IUCV_DBF_TEXT(name,level,text) \ do { \ debug_text_event(iucv_dbf_##name,level,text); \ } while (0) #define IUCV_DBF_HEX(name,level,addr,len) \ do { \ debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ } while (0) DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); /* Allow to sort out low debug levels early to avoid wasted sprints */ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level) { return (level <= dbf_grp->level); } #define IUCV_DBF_TEXT_(name, level, text...) \ do { \ if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ sprintf(__buf, text); \ debug_text_event(iucv_dbf_##name, level, __buf); \ put_cpu_var(iucv_dbf_txt_buf); \ } \ } while (0) #define IUCV_DBF_SPRINTF(name,level,text...) \ do { \ debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ debug_sprintf_event(iucv_dbf_trace, level, text ); \ } while (0) /** * some more debug stuff */ #define IUCV_HEXDUMP16(importance,header,ptr) \ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ *(((char*)ptr)+12),*(((char*)ptr)+13), \ *(((char*)ptr)+14),*(((char*)ptr)+15)); \ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ *(((char*)ptr)+16),*(((char*)ptr)+17), \ *(((char*)ptr)+18),*(((char*)ptr)+19), \ *(((char*)ptr)+20),*(((char*)ptr)+21), \ *(((char*)ptr)+22),*(((char*)ptr)+23), \ *(((char*)ptr)+24),*(((char*)ptr)+25), \ *(((char*)ptr)+26),*(((char*)ptr)+27), \ *(((char*)ptr)+28),*(((char*)ptr)+29), \ *(((char*)ptr)+30),*(((char*)ptr)+31)); #define PRINTK_HEADER " iucv: " /* for debugging */ /* dummy device to make sure netiucv_pm functions are called */ static struct device *netiucv_dev; static int netiucv_pm_prepare(struct device *); static void netiucv_pm_complete(struct device *); static int netiucv_pm_freeze(struct device *); static int netiucv_pm_restore_thaw(struct device *); static const struct dev_pm_ops netiucv_pm_ops = { .prepare = netiucv_pm_prepare, .complete = netiucv_pm_complete, .freeze = netiucv_pm_freeze, .thaw = netiucv_pm_restore_thaw, .restore = netiucv_pm_restore_thaw, }; static struct device_driver netiucv_driver = { .owner = THIS_MODULE, .name = "netiucv", .bus = &iucv_bus, .pm = &netiucv_pm_ops, }; static int netiucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]); static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]); static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]); static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *); static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *); static struct iucv_handler netiucv_handler = { .path_pending = netiucv_callback_connreq, .path_complete = netiucv_callback_connack, .path_severed = netiucv_callback_connrej, .path_quiesced = netiucv_callback_connsusp, .path_resumed = netiucv_callback_connres, .message_pending = netiucv_callback_rx, .message_complete = netiucv_callback_txdone }; /** * Per connection profiling data */ struct connection_profile { unsigned long maxmulti; unsigned long maxcqueue; unsigned long doios_single; unsigned long doios_multi; unsigned long txlen; unsigned long tx_time; struct timespec send_stamp; unsigned long tx_pending; unsigned long tx_max_pending; }; /** * Representation of one iucv connection */ struct iucv_connection { struct list_head list; struct iucv_path *path; struct sk_buff *rx_buff; struct sk_buff *tx_buff; struct sk_buff_head collect_queue; struct sk_buff_head commit_queue; spinlock_t collect_lock; int collect_len; int max_buffsize; fsm_timer timer; fsm_instance *fsm; struct net_device *netdev; struct connection_profile prof; char userid[9]; }; /** * Linked list of all connection structs. */ static LIST_HEAD(iucv_connection_list); static DEFINE_RWLOCK(iucv_connection_rwlock); /** * Representation of event-data for the * connection state machine. */ struct iucv_event { struct iucv_connection *conn; void *data; }; /** * Private part of the network device structure */ struct netiucv_priv { struct net_device_stats stats; unsigned long tbusy; fsm_instance *fsm; struct iucv_connection *conn; struct device *dev; int pm_state; }; /** * Link level header for a packet. */ struct ll_header { u16 next; }; #define NETIUCV_HDRLEN (sizeof(struct ll_header)) #define NETIUCV_BUFSIZE_MAX 32768 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) #define NETIUCV_MTU_DEFAULT 9216 #define NETIUCV_QUEUELEN_DEFAULT 50 #define NETIUCV_TIMEOUT_5SEC 5000 /** * Compatibility macros for busy handling * of network devices. */ static inline void netiucv_clear_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); clear_bit(0, &priv->tbusy); netif_wake_queue(dev); } static inline int netiucv_test_and_set_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); netif_stop_queue(dev); return test_and_set_bit(0, &priv->tbusy); } static u8 iucvMagic[16] = { 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 }; /** * Convert an iucv userId to its printable * form (strip whitespace at end). * * @param An iucv userId * * @returns The printable string (static data!!) */ static char *netiucv_printname(char *name) { static char tmp[9]; char *p = tmp; memcpy(tmp, name, 8); tmp[8] = '\0'; while (*p && (!isspace(*p))) p++; *p = '\0'; return tmp; } /** * States of the interface statemachine. */ enum dev_states { DEV_STATE_STOPPED, DEV_STATE_STARTWAIT, DEV_STATE_STOPWAIT, DEV_STATE_RUNNING, /** * MUST be always the last element!! */ NR_DEV_STATES }; static const char *dev_state_names[] = { "Stopped", "StartWait", "StopWait", "Running", }; /** * Events of the interface statemachine. */ enum dev_events { DEV_EVENT_START, DEV_EVENT_STOP, DEV_EVENT_CONUP, DEV_EVENT_CONDOWN, /** * MUST be always the last element!! */ NR_DEV_EVENTS }; static const char *dev_event_names[] = { "Start", "Stop", "Connection up", "Connection down", }; /** * Events of the connection statemachine */ enum conn_events { /** * Events, representing callbacks from * lowlevel iucv layer) */ CONN_EVENT_CONN_REQ, CONN_EVENT_CONN_ACK, CONN_EVENT_CONN_REJ, CONN_EVENT_CONN_SUS, CONN_EVENT_CONN_RES, CONN_EVENT_RX, CONN_EVENT_TXDONE, /** * Events, representing errors return codes from * calls to lowlevel iucv layer */ /** * Event, representing timer expiry. */ CONN_EVENT_TIMER, /** * Events, representing commands from upper levels. */ CONN_EVENT_START, CONN_EVENT_STOP, /** * MUST be always the last element!! */ NR_CONN_EVENTS, }; static const char *conn_event_names[] = { "Remote connection request", "Remote connection acknowledge", "Remote connection reject", "Connection suspended", "Connection resumed", "Data received", "Data sent", "Timer", "Start", "Stop", }; /** * States of the connection statemachine. */ enum conn_states { /** * Connection not assigned to any device, * initial state, invalid */ CONN_STATE_INVALID, /** * Userid assigned but not operating */ CONN_STATE_STOPPED, /** * Connection registered, * no connection request sent yet, * no connection request received */ CONN_STATE_STARTWAIT, /** * Connection registered and connection request sent, * no acknowledge and no connection request received yet. */ CONN_STATE_SETUPWAIT, /** * Connection up and running idle */ CONN_STATE_IDLE, /** * Data sent, awaiting CONN_EVENT_TXDONE */ CONN_STATE_TX, /** * Error during registration. */ CONN_STATE_REGERR, /** * Error during registration. */ CONN_STATE_CONNERR, /** * MUST be always the last element!! */ NR_CONN_STATES, }; static const char *conn_state_names[] = { "Invalid", "Stopped", "StartWait", "SetupWait", "Idle", "TX", "Terminating", "Registration error", "Connect error", }; /** * Debug Facility Stuff */ static debug_info_t *iucv_dbf_setup = NULL; static debug_info_t *iucv_dbf_data = NULL; static debug_info_t *iucv_dbf_trace = NULL; DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); static void iucv_unregister_dbf_views(void) { if (iucv_dbf_setup) debug_unregister(iucv_dbf_setup); if (iucv_dbf_data) debug_unregister(iucv_dbf_data); if (iucv_dbf_trace) debug_unregister(iucv_dbf_trace); } static int iucv_register_dbf_views(void) { iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, IUCV_DBF_SETUP_PAGES, IUCV_DBF_SETUP_NR_AREAS, IUCV_DBF_SETUP_LEN); iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME, IUCV_DBF_DATA_PAGES, IUCV_DBF_DATA_NR_AREAS, IUCV_DBF_DATA_LEN); iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME, IUCV_DBF_TRACE_PAGES, IUCV_DBF_TRACE_NR_AREAS, IUCV_DBF_TRACE_LEN); if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) || (iucv_dbf_trace == NULL)) { iucv_unregister_dbf_views(); return -ENOMEM; } debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view); debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL); debug_register_view(iucv_dbf_data, &debug_hex_ascii_view); debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL); debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view); debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL); return 0; } /* * Callback-wrappers, called from lowlevel iucv layer. */ static void netiucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) { struct iucv_connection *conn = path->private; struct iucv_event ev; ev.conn = conn; ev.data = msg; fsm_event(conn->fsm, CONN_EVENT_RX, &ev); } static void netiucv_callback_txdone(struct iucv_path *path, struct iucv_message *msg) { struct iucv_connection *conn = path->private; struct iucv_event ev; ev.conn = conn; ev.data = msg; fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); } static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) { struct iucv_connection *conn = path->private; fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn); } static int netiucv_callback_connreq(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { struct iucv_connection *conn = path->private; struct iucv_event ev; int rc; if (memcmp(iucvMagic, ipuser, 16)) /* ipuser must match iucvMagic. */ return -EINVAL; rc = -EINVAL; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(conn, &iucv_connection_list, list) { if (strncmp(ipvmid, conn->userid, 8)) continue; /* Found a matching connection for this path. */ conn->path = path; ev.conn = conn; ev.data = path; fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); rc = 0; } read_unlock_bh(&iucv_connection_rwlock); return rc; } static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) { struct iucv_connection *conn = path->private; fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn); } static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16]) { struct iucv_connection *conn = path->private; fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn); } static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16]) { struct iucv_connection *conn = path->private; fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); } /** * NOP action for statemachines */ static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) { } /* * Actions of the connection statemachine */ /** * netiucv_unpack_skb * @conn: The connection where this skb has been received. * @pskb: The received skb. * * Unpack a just received skb and hand it over to upper layers. * Helper function for conn_action_rx. */ static void netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) { struct net_device *dev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(dev); u16 offset = 0; skb_put(pskb, NETIUCV_HDRLEN); pskb->dev = dev; pskb->ip_summed = CHECKSUM_NONE; pskb->protocol = ntohs(ETH_P_IP); while (1) { struct sk_buff *skb; struct ll_header *header = (struct ll_header *) pskb->data; if (!header->next) break; skb_pull(pskb, NETIUCV_HDRLEN); header->next -= offset; offset += header->next; header->next -= NETIUCV_HDRLEN; if (skb_tailroom(pskb) < header->next) { IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", header->next, skb_tailroom(pskb)); return; } skb_put(pskb, header->next); skb_reset_mac_header(pskb); skb = dev_alloc_skb(pskb->len); if (!skb) { IUCV_DBF_TEXT(data, 2, "Out of memory in netiucv_unpack_skb\n"); privptr->stats.rx_dropped++; return; } skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), pskb->len); skb_reset_mac_header(skb); skb->dev = pskb->dev; skb->protocol = pskb->protocol; pskb->ip_summed = CHECKSUM_UNNECESSARY; privptr->stats.rx_packets++; privptr->stats.rx_bytes += skb->len; /* * Since receiving is always initiated from a tasklet (in iucv.c), * we must use netif_rx_ni() instead of netif_rx() */ netif_rx_ni(skb); skb_pull(pskb, header->next); skb_put(pskb, NETIUCV_HDRLEN); } } static void conn_action_rx(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct iucv_message *msg = ev->data; struct netiucv_priv *privptr = netdev_priv(conn->netdev); int rc; IUCV_DBF_TEXT(trace, 4, __func__); if (!conn->netdev) { iucv_message_reject(conn->path, msg); IUCV_DBF_TEXT(data, 2, "Received data for unlinked connection\n"); return; } if (msg->length > conn->max_buffsize) { iucv_message_reject(conn->path, msg); privptr->stats.rx_dropped++; IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", msg->length, conn->max_buffsize); return; } conn->rx_buff->data = conn->rx_buff->head; skb_reset_tail_pointer(conn->rx_buff); conn->rx_buff->len = 0; rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, msg->length, NULL); if (rc || msg->length < 5) { privptr->stats.rx_errors++; IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); return; } netiucv_unpack_skb(conn, conn->rx_buff); } static void conn_action_txdone(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct iucv_message *msg = ev->data; struct iucv_message txmsg; struct netiucv_priv *privptr = NULL; u32 single_flag = msg->tag; u32 txbytes = 0; u32 txpackets = 0; u32 stat_maxcq = 0; struct sk_buff *skb; unsigned long saveflags; struct ll_header header; int rc; IUCV_DBF_TEXT(trace, 4, __func__); if (conn && conn->netdev) privptr = netdev_priv(conn->netdev); conn->prof.tx_pending--; if (single_flag) { if ((skb = skb_dequeue(&conn->commit_queue))) { atomic_dec(&skb->users); if (privptr) { privptr->stats.tx_packets++; privptr->stats.tx_bytes += (skb->len - NETIUCV_HDRLEN - NETIUCV_HDRLEN); } dev_kfree_skb_any(skb); } } conn->tx_buff->data = conn->tx_buff->head; skb_reset_tail_pointer(conn->tx_buff); conn->tx_buff->len = 0; spin_lock_irqsave(&conn->collect_lock, saveflags); while ((skb = skb_dequeue(&conn->collect_queue))) { header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); skb_copy_from_linear_data(skb, skb_put(conn->tx_buff, skb->len), skb->len); txbytes += skb->len; txpackets++; stat_maxcq++; atomic_dec(&skb->users); dev_kfree_skb_any(skb); } if (conn->collect_len > conn->prof.maxmulti) conn->prof.maxmulti = conn->collect_len; conn->collect_len = 0; spin_unlock_irqrestore(&conn->collect_lock, saveflags); if (conn->tx_buff->len == 0) { fsm_newstate(fi, CONN_STATE_IDLE); return; } header.next = 0; memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); conn->prof.send_stamp = current_kernel_time(); txmsg.class = 0; txmsg.tag = 0; rc = iucv_message_send(conn->path, &txmsg, 0, 0, conn->tx_buff->data, conn->tx_buff->len); conn->prof.doios_multi++; conn->prof.txlen += conn->tx_buff->len; conn->prof.tx_pending++; if (conn->prof.tx_pending > conn->prof.tx_max_pending) conn->prof.tx_max_pending = conn->prof.tx_pending; if (rc) { conn->prof.tx_pending--; fsm_newstate(fi, CONN_STATE_IDLE); if (privptr) privptr->stats.tx_errors += txpackets; IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); } else { if (privptr) { privptr->stats.tx_packets += txpackets; privptr->stats.tx_bytes += txbytes; } if (stat_maxcq > conn->prof.maxcqueue) conn->prof.maxcqueue = stat_maxcq; } } static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct iucv_path *path = ev->data; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); int rc; IUCV_DBF_TEXT(trace, 3, __func__); conn->path = path; path->msglim = NETIUCV_QUEUELEN_DEFAULT; path->flags = 0; rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); if (rc) { IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); return; } fsm_newstate(fi, CONN_STATE_IDLE); netdev->tx_queue_len = conn->path->msglim; fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); } static void conn_action_connreject(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_path *path = ev->data; IUCV_DBF_TEXT(trace, 3, __func__); iucv_path_sever(path, NULL); } static void conn_action_connack(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); fsm_newstate(fi, CONN_STATE_IDLE); netdev->tx_queue_len = conn->path->msglim; fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); } static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); iucv_path_sever(conn->path, NULL); fsm_newstate(fi, CONN_STATE_STARTWAIT); } static void conn_action_connsever(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); iucv_path_sever(conn->path, NULL); dev_info(privptr->dev, "The peer interface of the IUCV device" " has closed the connection\n"); IUCV_DBF_TEXT(data, 2, "conn_action_connsever: Remote dropped connection\n"); fsm_newstate(fi, CONN_STATE_STARTWAIT); fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); } static void conn_action_start(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); int rc; IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, CONN_STATE_STARTWAIT); IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", netdev->name, conn->userid); /* * We must set the state before calling iucv_connect because the * callback handler could be called at any point after the connection * request is sent */ fsm_newstate(fi, CONN_STATE_SETUPWAIT); conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, NULL, iucvMagic, conn); switch (rc) { case 0: netdev->tx_queue_len = conn->path->msglim; fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, CONN_EVENT_TIMER, conn); return; case 11: dev_warn(privptr->dev, "The IUCV device failed to connect to z/VM guest %s\n", netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 12: dev_warn(privptr->dev, "The IUCV device failed to connect to the peer on z/VM" " guest %s\n", netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 13: dev_err(privptr->dev, "Connecting the IUCV device would exceed the maximum" " number of IUCV connections\n"); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 14: dev_err(privptr->dev, "z/VM guest %s has too many IUCV connections" " to connect with the IUCV device\n", netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 15: dev_err(privptr->dev, "The IUCV device cannot connect to a z/VM guest with no" " IUCV authorization\n"); fsm_newstate(fi, CONN_STATE_CONNERR); break; default: dev_err(privptr->dev, "Connecting the IUCV device failed with error %d\n", rc); fsm_newstate(fi, CONN_STATE_CONNERR); break; } IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); kfree(conn->path); conn->path = NULL; } static void netiucv_purge_skb_queue(struct sk_buff_head *q) { struct sk_buff *skb; while ((skb = skb_dequeue(q))) { atomic_dec(&skb->users); dev_kfree_skb_any(skb); } } static void conn_action_stop(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); fsm_newstate(fi, CONN_STATE_STOPPED); netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); iucv_path_sever(conn->path, iucvMagic); kfree(conn->path); conn->path = NULL; } netiucv_purge_skb_queue(&conn->commit_queue); fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); } static void conn_action_inval(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n", netdev->name, conn->userid); } static const fsm_node conn_fsm[] = { { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval }, { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start }, { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject }, { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject }, { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject }, { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack }, { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev }, { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever }, { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever }, { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever }, { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx }, { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx }, { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone }, { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone }, }; static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); /* * Actions for interface - statemachine. */ /** * dev_action_start * @fi: An instance of an interface statemachine. * @event: The event, just happened. * @arg: Generic pointer, casted from struct net_device * upon call. * * Startup connection by sending CONN_EVENT_START to it. */ static void dev_action_start(fsm_instance *fi, int event, void *arg) { struct net_device *dev = arg; struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, DEV_STATE_STARTWAIT); fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); } /** * Shutdown connection by sending CONN_EVENT_STOP to it. * * @param fi An instance of an interface statemachine. * @param event The event, just happened. * @param arg Generic pointer, casted from struct net_device * upon call. */ static void dev_action_stop(fsm_instance *fi, int event, void *arg) { struct net_device *dev = arg; struct netiucv_priv *privptr = netdev_priv(dev); struct iucv_event ev; IUCV_DBF_TEXT(trace, 3, __func__); ev.conn = privptr->conn; fsm_newstate(fi, DEV_STATE_STOPWAIT); fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev); } /** * Called from connection statemachine * when a connection is up and running. * * @param fi An instance of an interface statemachine. * @param event The event, just happened. * @param arg Generic pointer, casted from struct net_device * upon call. */ static void dev_action_connup(fsm_instance *fi, int event, void *arg) { struct net_device *dev = arg; struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __func__); switch (fsm_getstate(fi)) { case DEV_STATE_STARTWAIT: fsm_newstate(fi, DEV_STATE_RUNNING); dev_info(privptr->dev, "The IUCV device has been connected" " successfully to %s\n", privptr->conn->userid); IUCV_DBF_TEXT(setup, 3, "connection is up and running\n"); break; case DEV_STATE_STOPWAIT: IUCV_DBF_TEXT(data, 2, "dev_action_connup: in DEV_STATE_STOPWAIT\n"); break; } } /** * Called from connection statemachine * when a connection has been shutdown. * * @param fi An instance of an interface statemachine. * @param event The event, just happened. * @param arg Generic pointer, casted from struct net_device * upon call. */ static void dev_action_conndown(fsm_instance *fi, int event, void *arg) { IUCV_DBF_TEXT(trace, 3, __func__); switch (fsm_getstate(fi)) { case DEV_STATE_RUNNING: fsm_newstate(fi, DEV_STATE_STARTWAIT); break; case DEV_STATE_STOPWAIT: fsm_newstate(fi, DEV_STATE_STOPPED); IUCV_DBF_TEXT(setup, 3, "connection is down\n"); break; } } static const fsm_node dev_fsm[] = { { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start }, { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown }, { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop }, { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup }, { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown }, { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop }, }; static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); /** * Transmit a packet. * This is a helper function for netiucv_tx(). * * @param conn Connection to be used for sending. * @param skb Pointer to struct sk_buff of packet to send. * The linklevel header has already been set up * by netiucv_tx(). * * @return 0 on success, -ERRNO on failure. (Never fails.) */ static int netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { struct iucv_message msg; unsigned long saveflags; struct ll_header header; int rc; if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { int l = skb->len + NETIUCV_HDRLEN; spin_lock_irqsave(&conn->collect_lock, saveflags); if (conn->collect_len + l > (conn->max_buffsize - NETIUCV_HDRLEN)) { rc = -EBUSY; IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_transmit_skb\n"); } else { atomic_inc(&skb->users); skb_queue_tail(&conn->collect_queue, skb); conn->collect_len += l; rc = 0; } spin_unlock_irqrestore(&conn->collect_lock, saveflags); } else { struct sk_buff *nskb = skb; /** * Copy the skb to a new allocated skb in lowmem only if the * data is located above 2G in memory or tailroom is < 2. */ unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + NETIUCV_HDRLEN)) >> 31; int copied = 0; if (hi || (skb_tailroom(skb) < 2)) { nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); if (!nskb) { IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); rc = -ENOMEM; return rc; } else { skb_reserve(nskb, NETIUCV_HDRLEN); memcpy(skb_put(nskb, skb->len), skb->data, skb->len); } copied = 1; } /** * skb now is below 2G and has enough room. Add headers. */ header.next = nskb->len + NETIUCV_HDRLEN; memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); header.next = 0; memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); fsm_newstate(conn->fsm, CONN_STATE_TX); conn->prof.send_stamp = current_kernel_time(); msg.tag = 1; msg.class = 0; rc = iucv_message_send(conn->path, &msg, 0, 0, nskb->data, nskb->len); conn->prof.doios_single++; conn->prof.txlen += skb->len; conn->prof.tx_pending++; if (conn->prof.tx_pending > conn->prof.tx_max_pending) conn->prof.tx_max_pending = conn->prof.tx_pending; if (rc) { struct netiucv_priv *privptr; fsm_newstate(conn->fsm, CONN_STATE_IDLE); conn->prof.tx_pending--; privptr = netdev_priv(conn->netdev); if (privptr) privptr->stats.tx_errors++; if (copied) dev_kfree_skb(nskb); else { /** * Remove our headers. They get added * again on retransmit. */ skb_pull(skb, NETIUCV_HDRLEN); skb_trim(skb, skb->len - NETIUCV_HDRLEN); } IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); } else { if (copied) dev_kfree_skb(skb); atomic_inc(&nskb->users); skb_queue_tail(&conn->commit_queue, nskb); } } return rc; } /* * Interface API for upper network layers */ /** * Open an interface. * Called from generic network layer when ifconfig up is run. * * @param dev Pointer to interface struct. * * @return 0 on success, -ERRNO on failure. (Never fails.) */ static int netiucv_open(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); fsm_event(priv->fsm, DEV_EVENT_START, dev); return 0; } /** * Close an interface. * Called from generic network layer when ifconfig down is run. * * @param dev Pointer to interface struct. * * @return 0 on success, -ERRNO on failure. (Never fails.) */ static int netiucv_close(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); fsm_event(priv->fsm, DEV_EVENT_STOP, dev); return 0; } static int netiucv_pm_prepare(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); return 0; } static void netiucv_pm_complete(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); return; } /** * netiucv_pm_freeze() - Freeze PM callback * @dev: netiucv device * * close open netiucv interfaces */ static int netiucv_pm_freeze(struct device *dev) { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = NULL; int rc = 0; IUCV_DBF_TEXT(trace, 3, __func__); if (priv && priv->conn) ndev = priv->conn->netdev; if (!ndev) goto out; netif_device_detach(ndev); priv->pm_state = fsm_getstate(priv->fsm); rc = netiucv_close(ndev); out: return rc; } /** * netiucv_pm_restore_thaw() - Thaw and restore PM callback * @dev: netiucv device * * re-open netiucv interfaces closed during freeze */ static int netiucv_pm_restore_thaw(struct device *dev) { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = NULL; int rc = 0; IUCV_DBF_TEXT(trace, 3, __func__); if (priv && priv->conn) ndev = priv->conn->netdev; if (!ndev) goto out; switch (priv->pm_state) { case DEV_STATE_RUNNING: case DEV_STATE_STARTWAIT: rc = netiucv_open(ndev); break; default: break; } netif_device_attach(ndev); out: return rc; } /** * Start transmission of a packet. * Called from generic network device layer. * * @param skb Pointer to buffer containing the packet. * @param dev Pointer to interface struct. * * @return 0 if packet consumed, !0 if packet rejected. * Note: If we return !0, then the packet is free'd by * the generic network layer. */ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) { struct netiucv_priv *privptr = netdev_priv(dev); int rc; IUCV_DBF_TEXT(trace, 4, __func__); /** * Some sanity checks ... */ if (skb == NULL) { IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); privptr->stats.tx_dropped++; return NETDEV_TX_OK; } if (skb_headroom(skb) < NETIUCV_HDRLEN) { IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); dev_kfree_skb(skb); privptr->stats.tx_dropped++; return NETDEV_TX_OK; } /** * If connection is not running, try to restart it * and throw away packet. */ if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { dev_kfree_skb(skb); privptr->stats.tx_dropped++; privptr->stats.tx_errors++; privptr->stats.tx_carrier_errors++; return NETDEV_TX_OK; } if (netiucv_test_and_set_busy(dev)) { IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n"); return NETDEV_TX_BUSY; } dev->trans_start = jiffies; rc = netiucv_transmit_skb(privptr->conn, skb); netiucv_clear_busy(dev); return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK; } /** * netiucv_stats * @dev: Pointer to interface struct. * * Returns interface statistics of a device. * * Returns pointer to stats struct of this interface. */ static struct net_device_stats *netiucv_stats (struct net_device * dev) { struct netiucv_priv *priv = netdev_priv(dev); IUCV_DBF_TEXT(trace, 5, __func__); return &priv->stats; } /** * netiucv_change_mtu * @dev: Pointer to interface struct. * @new_mtu: The new MTU to use for this interface. * * Sets MTU of an interface. * * Returns 0 on success, -EINVAL if MTU is out of valid range. * (valid range is 576 .. NETIUCV_MTU_MAX). */ static int netiucv_change_mtu(struct net_device * dev, int new_mtu) { IUCV_DBF_TEXT(trace, 3, __func__); if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); return -EINVAL; } dev->mtu = new_mtu; return 0; } /* * attributes in sysfs */ static ssize_t user_show(struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); } static ssize_t user_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->conn->netdev; char *p; char *tmp; char username[9]; int i; struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __func__); if (count > 9) { IUCV_DBF_TEXT_(setup, 2, "%d is length of username\n", (int) count); return -EINVAL; } tmp = strsep((char **) &buf, "\n"); for (i = 0, p = tmp; i < 8 && *p; i++, p++) { if (isalnum(*p) || (*p == '$')) { username[i]= toupper(*p); continue; } if (*p == '\n') { /* trailing lf, grr */ break; } IUCV_DBF_TEXT_(setup, 2, "username: invalid character %c\n", *p); return -EINVAL; } while (i < 8) username[i++] = ' '; username[8] = '\0'; if (memcmp(username, priv->conn->userid, 9) && (ndev->flags & (IFF_UP | IFF_RUNNING))) { /* username changed while the interface is active. */ IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); return -EPERM; } read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { read_unlock_bh(&iucv_connection_rwlock); IUCV_DBF_TEXT_(setup, 2, "user_write: Connection " "to %s already exists\n", username); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); memcpy(priv->conn->userid, username, 9); return count; } static DEVICE_ATTR(user, 0644, user_show, user_write); static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%d\n", priv->conn->max_buffsize); } static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->conn->netdev; char *e; int bs1; IUCV_DBF_TEXT(trace, 3, __func__); if (count >= 39) return -EINVAL; bs1 = simple_strtoul(buf, &e, 0); if (e && (!isspace(*e))) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); return -EINVAL; } if (bs1 > NETIUCV_BUFSIZE_MAX) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too large\n", bs1); return -EINVAL; } if ((ndev->flags & IFF_RUNNING) && (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too small\n", bs1); return -EINVAL; } if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too small\n", bs1); return -EINVAL; } priv->conn->max_buffsize = bs1; if (!(ndev->flags & IFF_RUNNING)) ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN; return count; } static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); } static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); static ssize_t conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); } static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); static ssize_t maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); } static ssize_t maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.maxmulti = 0; return count; } static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); } static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.maxcqueue = 0; return count; } static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); } static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.doios_single = 0; return count; } static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); } static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); priv->conn->prof.doios_multi = 0; return count; } static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.txlen); } static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.txlen = 0; return count; } static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); } static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.tx_time = 0; return count; } static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); } static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.tx_pending = 0; return count; } static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); } static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.tx_max_pending = 0; return count; } static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write); static struct attribute *netiucv_attrs[] = { &dev_attr_buffer.attr, &dev_attr_user.attr, NULL, }; static struct attribute_group netiucv_attr_group = { .attrs = netiucv_attrs, }; static struct attribute *netiucv_stat_attrs[] = { &dev_attr_device_fsm_state.attr, &dev_attr_connection_fsm_state.attr, &dev_attr_max_tx_buffer_used.attr, &dev_attr_max_chained_skbs.attr, &dev_attr_tx_single_write_ops.attr, &dev_attr_tx_multi_write_ops.attr, &dev_attr_netto_bytes.attr, &dev_attr_max_tx_io_time.attr, &dev_attr_tx_pending.attr, &dev_attr_tx_max_pending.attr, NULL, }; static struct attribute_group netiucv_stat_attr_group = { .name = "stats", .attrs = netiucv_stat_attrs, }; static int netiucv_add_files(struct device *dev) { int ret; IUCV_DBF_TEXT(trace, 3, __func__); ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group); if (ret) return ret; ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group); if (ret) sysfs_remove_group(&dev->kobj, &netiucv_attr_group); return ret; } static void netiucv_remove_files(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); sysfs_remove_group(&dev->kobj, &netiucv_attr_group); } static int netiucv_register_device(struct net_device *ndev) { struct netiucv_priv *priv = netdev_priv(ndev); struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); int ret; IUCV_DBF_TEXT(trace, 3, __func__); if (dev) { dev_set_name(dev, "net%s", ndev->name); dev->bus = &iucv_bus; dev->parent = iucv_root; /* * The release function could be called after the * module has been unloaded. It's _only_ task is to * free the struct. Therefore, we specify kfree() * directly here. (Probably a little bit obfuscating * but legitime ...). */ dev->release = (void (*)(struct device *))kfree; dev->driver = &netiucv_driver; } else return -ENOMEM; ret = device_register(dev); if (ret) { put_device(dev); return ret; } ret = netiucv_add_files(dev); if (ret) goto out_unreg; priv->dev = dev; dev_set_drvdata(dev, priv); return 0; out_unreg: device_unregister(dev); return ret; } static void netiucv_unregister_device(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); netiucv_remove_files(dev); device_unregister(dev); } /** * Allocate and initialize a new connection structure. * Add it to the list of netiucv connections; */ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, char *username) { struct iucv_connection *conn; conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) goto out; skb_queue_head_init(&conn->collect_queue); skb_queue_head_init(&conn->commit_queue); spin_lock_init(&conn->collect_lock); conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; conn->netdev = dev; conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); if (!conn->rx_buff) goto out_conn; conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); if (!conn->tx_buff) goto out_rx; conn->fsm = init_fsm("netiucvconn", conn_state_names, conn_event_names, NR_CONN_STATES, NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, GFP_KERNEL); if (!conn->fsm) goto out_tx; fsm_settimer(conn->fsm, &conn->timer); fsm_newstate(conn->fsm, CONN_STATE_INVALID); if (username) { memcpy(conn->userid, username, 9); fsm_newstate(conn->fsm, CONN_STATE_STOPPED); } write_lock_bh(&iucv_connection_rwlock); list_add_tail(&conn->list, &iucv_connection_list); write_unlock_bh(&iucv_connection_rwlock); return conn; out_tx: kfree_skb(conn->tx_buff); out_rx: kfree_skb(conn->rx_buff); out_conn: kfree(conn); out: return NULL; } /** * Release a connection structure and remove it from the * list of netiucv connections. */ static void netiucv_remove_connection(struct iucv_connection *conn) { IUCV_DBF_TEXT(trace, 3, __func__); write_lock_bh(&iucv_connection_rwlock); list_del_init(&conn->list); write_unlock_bh(&iucv_connection_rwlock); fsm_deltimer(&conn->timer); netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { iucv_path_sever(conn->path, iucvMagic); kfree(conn->path); conn->path = NULL; } netiucv_purge_skb_queue(&conn->commit_queue); kfree_fsm(conn->fsm); kfree_skb(conn->rx_buff); kfree_skb(conn->tx_buff); } /** * Release everything of a net device. */ static void netiucv_free_netdevice(struct net_device *dev) { struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __func__); if (!dev) return; if (privptr) { if (privptr->conn) netiucv_remove_connection(privptr->conn); if (privptr->fsm) kfree_fsm(privptr->fsm); privptr->conn = NULL; privptr->fsm = NULL; /* privptr gets freed by free_netdev() */ } free_netdev(dev); } /** * Initialize a net device. (Called from kernel in alloc_netdev()) */ static const struct net_device_ops netiucv_netdev_ops = { .ndo_open = netiucv_open, .ndo_stop = netiucv_close, .ndo_get_stats = netiucv_stats, .ndo_start_xmit = netiucv_tx, .ndo_change_mtu = netiucv_change_mtu, }; static void netiucv_setup_netdevice(struct net_device *dev) { dev->mtu = NETIUCV_MTU_DEFAULT; dev->destructor = netiucv_free_netdevice; dev->hard_header_len = NETIUCV_HDRLEN; dev->addr_len = 0; dev->type = ARPHRD_SLIP; dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->netdev_ops = &netiucv_netdev_ops; } /** * Allocate and initialize everything of a net device. */ static struct net_device *netiucv_init_netdevice(char *username) { struct netiucv_priv *privptr; struct net_device *dev; dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d", netiucv_setup_netdevice); if (!dev) return NULL; privptr = netdev_priv(dev); privptr->fsm = init_fsm("netiucvdev", dev_state_names, dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, dev_fsm, DEV_FSM_LEN, GFP_KERNEL); if (!privptr->fsm) goto out_netdev; privptr->conn = netiucv_new_connection(dev, username); if (!privptr->conn) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); goto out_fsm; } fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); return dev; out_fsm: kfree_fsm(privptr->fsm); out_netdev: free_netdev(dev); return NULL; } static ssize_t conn_write(struct device_driver *drv, const char *buf, size_t count) { const char *p; char username[9]; int i, rc; struct net_device *dev; struct netiucv_priv *priv; struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __func__); if (count>9) { IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); return -EINVAL; } for (i = 0, p = buf; i < 8 && *p; i++, p++) { if (isalnum(*p) || *p == '$') { username[i] = toupper(*p); continue; } if (*p == '\n') /* trailing lf, grr */ break; IUCV_DBF_TEXT_(setup, 2, "conn_write: invalid character %c\n", *p); return -EINVAL; } while (i < 8) username[i++] = ' '; username[8] = '\0'; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { if (!strncmp(username, cp->userid, 9)) { read_unlock_bh(&iucv_connection_rwlock); IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection " "to %s already exists\n", username); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); dev = netiucv_init_netdevice(username); if (!dev) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); return -ENODEV; } rc = netiucv_register_device(dev); if (rc) { IUCV_DBF_TEXT_(setup, 2, "ret %d from netiucv_register_device\n", rc); goto out_free_ndev; } /* sysfs magic */ priv = netdev_priv(dev); SET_NETDEV_DEV(dev, priv->dev); rc = register_netdev(dev); if (rc) goto out_unreg; dev_info(priv->dev, "The IUCV interface to %s has been" " established successfully\n", netiucv_printname(username)); return count; out_unreg: netiucv_unregister_device(priv->dev); out_free_ndev: netiucv_free_netdevice(dev); return rc; } static DRIVER_ATTR(connection, 0200, NULL, conn_write); static ssize_t remove_write (struct device_driver *drv, const char *buf, size_t count) { struct iucv_connection *cp; struct net_device *ndev; struct netiucv_priv *priv; struct device *dev; char name[IFNAMSIZ]; const char *p; int i; IUCV_DBF_TEXT(trace, 3, __func__); if (count >= IFNAMSIZ) count = IFNAMSIZ - 1; for (i = 0, p = buf; i < count && *p; i++, p++) { if (*p == '\n' || *p == ' ') /* trailing lf, grr */ break; name[i] = *p; } name[i] = '\0'; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { ndev = cp->netdev; priv = netdev_priv(ndev); dev = priv->dev; if (strncmp(name, ndev->name, count)) continue; read_unlock_bh(&iucv_connection_rwlock); if (ndev->flags & (IFF_UP | IFF_RUNNING)) { dev_warn(dev, "The IUCV device is connected" " to %s and cannot be removed\n", priv->conn->userid); IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); return -EPERM; } unregister_netdev(ndev); netiucv_unregister_device(dev); return count; } read_unlock_bh(&iucv_connection_rwlock); IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); return -EINVAL; } static DRIVER_ATTR(remove, 0200, NULL, remove_write); static struct attribute * netiucv_drv_attrs[] = { &driver_attr_connection.attr, &driver_attr_remove.attr, NULL, }; static struct attribute_group netiucv_drv_attr_group = { .attrs = netiucv_drv_attrs, }; static const struct attribute_group *netiucv_drv_attr_groups[] = { &netiucv_drv_attr_group, NULL, }; static void netiucv_banner(void) { pr_info("driver initialized\n"); } static void __exit netiucv_exit(void) { struct iucv_connection *cp; struct net_device *ndev; struct netiucv_priv *priv; struct device *dev; IUCV_DBF_TEXT(trace, 3, __func__); while (!list_empty(&iucv_connection_list)) { cp = list_entry(iucv_connection_list.next, struct iucv_connection, list); ndev = cp->netdev; priv = netdev_priv(ndev); dev = priv->dev; unregister_netdev(ndev); netiucv_unregister_device(dev); } device_unregister(netiucv_dev); driver_unregister(&netiucv_driver); iucv_unregister(&netiucv_handler, 1); iucv_unregister_dbf_views(); pr_info("driver unloaded\n"); return; } static int __init netiucv_init(void) { int rc; rc = iucv_register_dbf_views(); if (rc) goto out; rc = iucv_register(&netiucv_handler, 1); if (rc) goto out_dbf; IUCV_DBF_TEXT(trace, 3, __func__); netiucv_driver.groups = netiucv_drv_attr_groups; rc = driver_register(&netiucv_driver); if (rc) { IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); goto out_iucv; } /* establish dummy device */ netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!netiucv_dev) { rc = -ENOMEM; goto out_driver; } dev_set_name(netiucv_dev, "netiucv"); netiucv_dev->bus = &iucv_bus; netiucv_dev->parent = iucv_root; netiucv_dev->release = (void (*)(struct device *))kfree; netiucv_dev->driver = &netiucv_driver; rc = device_register(netiucv_dev); if (rc) { put_device(netiucv_dev); goto out_driver; } netiucv_banner(); return rc; out_driver: driver_unregister(&netiucv_driver); out_iucv: iucv_unregister(&netiucv_handler, 1); out_dbf: iucv_unregister_dbf_views(); out: return rc; } module_init(netiucv_init); module_exit(netiucv_exit); MODULE_LICENSE("GPL");
gpl-2.0
Coolexe/shooteru-ics-crc-3.0.16-e733189
drivers/s390/net/netiucv.c
2801
58371
/* * IUCV network driver * * Copyright IBM Corp. 2001, 2009 * * Author(s): * Original netiucv driver: * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) * Sysfs integration and all bugs therein: * Cornelia Huck (cornelia.huck@de.ibm.com) * PM functions: * Ursula Braun (ursula.braun@de.ibm.com) * * Documentation used: * the source of the original IUCV driver by: * Stefan Hegewald <hegewald@de.ibm.com> * Hartmut Penner <hpenner@de.ibm.com> * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #define KMSG_COMPONENT "netiucv" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #undef DEBUG #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/bitops.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/device.h> #include <linux/ip.h> #include <linux/if_arp.h> #include <linux/tcp.h> #include <linux/skbuff.h> #include <linux/ctype.h> #include <net/dst.h> #include <asm/io.h> #include <asm/uaccess.h> #include <net/iucv/iucv.h> #include "fsm.h" MODULE_AUTHOR ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); /** * Debug Facility stuff */ #define IUCV_DBF_SETUP_NAME "iucv_setup" #define IUCV_DBF_SETUP_LEN 32 #define IUCV_DBF_SETUP_PAGES 2 #define IUCV_DBF_SETUP_NR_AREAS 1 #define IUCV_DBF_SETUP_LEVEL 3 #define IUCV_DBF_DATA_NAME "iucv_data" #define IUCV_DBF_DATA_LEN 128 #define IUCV_DBF_DATA_PAGES 2 #define IUCV_DBF_DATA_NR_AREAS 1 #define IUCV_DBF_DATA_LEVEL 2 #define IUCV_DBF_TRACE_NAME "iucv_trace" #define IUCV_DBF_TRACE_LEN 16 #define IUCV_DBF_TRACE_PAGES 4 #define IUCV_DBF_TRACE_NR_AREAS 1 #define IUCV_DBF_TRACE_LEVEL 3 #define IUCV_DBF_TEXT(name,level,text) \ do { \ debug_text_event(iucv_dbf_##name,level,text); \ } while (0) #define IUCV_DBF_HEX(name,level,addr,len) \ do { \ debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ } while (0) DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); /* Allow to sort out low debug levels early to avoid wasted sprints */ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level) { return (level <= dbf_grp->level); } #define IUCV_DBF_TEXT_(name, level, text...) \ do { \ if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ sprintf(__buf, text); \ debug_text_event(iucv_dbf_##name, level, __buf); \ put_cpu_var(iucv_dbf_txt_buf); \ } \ } while (0) #define IUCV_DBF_SPRINTF(name,level,text...) \ do { \ debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ debug_sprintf_event(iucv_dbf_trace, level, text ); \ } while (0) /** * some more debug stuff */ #define IUCV_HEXDUMP16(importance,header,ptr) \ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ *(((char*)ptr)+12),*(((char*)ptr)+13), \ *(((char*)ptr)+14),*(((char*)ptr)+15)); \ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ *(((char*)ptr)+16),*(((char*)ptr)+17), \ *(((char*)ptr)+18),*(((char*)ptr)+19), \ *(((char*)ptr)+20),*(((char*)ptr)+21), \ *(((char*)ptr)+22),*(((char*)ptr)+23), \ *(((char*)ptr)+24),*(((char*)ptr)+25), \ *(((char*)ptr)+26),*(((char*)ptr)+27), \ *(((char*)ptr)+28),*(((char*)ptr)+29), \ *(((char*)ptr)+30),*(((char*)ptr)+31)); #define PRINTK_HEADER " iucv: " /* for debugging */ /* dummy device to make sure netiucv_pm functions are called */ static struct device *netiucv_dev; static int netiucv_pm_prepare(struct device *); static void netiucv_pm_complete(struct device *); static int netiucv_pm_freeze(struct device *); static int netiucv_pm_restore_thaw(struct device *); static const struct dev_pm_ops netiucv_pm_ops = { .prepare = netiucv_pm_prepare, .complete = netiucv_pm_complete, .freeze = netiucv_pm_freeze, .thaw = netiucv_pm_restore_thaw, .restore = netiucv_pm_restore_thaw, }; static struct device_driver netiucv_driver = { .owner = THIS_MODULE, .name = "netiucv", .bus = &iucv_bus, .pm = &netiucv_pm_ops, }; static int netiucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]); static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]); static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]); static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *); static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *); static struct iucv_handler netiucv_handler = { .path_pending = netiucv_callback_connreq, .path_complete = netiucv_callback_connack, .path_severed = netiucv_callback_connrej, .path_quiesced = netiucv_callback_connsusp, .path_resumed = netiucv_callback_connres, .message_pending = netiucv_callback_rx, .message_complete = netiucv_callback_txdone }; /** * Per connection profiling data */ struct connection_profile { unsigned long maxmulti; unsigned long maxcqueue; unsigned long doios_single; unsigned long doios_multi; unsigned long txlen; unsigned long tx_time; struct timespec send_stamp; unsigned long tx_pending; unsigned long tx_max_pending; }; /** * Representation of one iucv connection */ struct iucv_connection { struct list_head list; struct iucv_path *path; struct sk_buff *rx_buff; struct sk_buff *tx_buff; struct sk_buff_head collect_queue; struct sk_buff_head commit_queue; spinlock_t collect_lock; int collect_len; int max_buffsize; fsm_timer timer; fsm_instance *fsm; struct net_device *netdev; struct connection_profile prof; char userid[9]; }; /** * Linked list of all connection structs. */ static LIST_HEAD(iucv_connection_list); static DEFINE_RWLOCK(iucv_connection_rwlock); /** * Representation of event-data for the * connection state machine. */ struct iucv_event { struct iucv_connection *conn; void *data; }; /** * Private part of the network device structure */ struct netiucv_priv { struct net_device_stats stats; unsigned long tbusy; fsm_instance *fsm; struct iucv_connection *conn; struct device *dev; int pm_state; }; /** * Link level header for a packet. */ struct ll_header { u16 next; }; #define NETIUCV_HDRLEN (sizeof(struct ll_header)) #define NETIUCV_BUFSIZE_MAX 32768 #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) #define NETIUCV_MTU_DEFAULT 9216 #define NETIUCV_QUEUELEN_DEFAULT 50 #define NETIUCV_TIMEOUT_5SEC 5000 /** * Compatibility macros for busy handling * of network devices. */ static inline void netiucv_clear_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); clear_bit(0, &priv->tbusy); netif_wake_queue(dev); } static inline int netiucv_test_and_set_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); netif_stop_queue(dev); return test_and_set_bit(0, &priv->tbusy); } static u8 iucvMagic[16] = { 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 }; /** * Convert an iucv userId to its printable * form (strip whitespace at end). * * @param An iucv userId * * @returns The printable string (static data!!) */ static char *netiucv_printname(char *name) { static char tmp[9]; char *p = tmp; memcpy(tmp, name, 8); tmp[8] = '\0'; while (*p && (!isspace(*p))) p++; *p = '\0'; return tmp; } /** * States of the interface statemachine. */ enum dev_states { DEV_STATE_STOPPED, DEV_STATE_STARTWAIT, DEV_STATE_STOPWAIT, DEV_STATE_RUNNING, /** * MUST be always the last element!! */ NR_DEV_STATES }; static const char *dev_state_names[] = { "Stopped", "StartWait", "StopWait", "Running", }; /** * Events of the interface statemachine. */ enum dev_events { DEV_EVENT_START, DEV_EVENT_STOP, DEV_EVENT_CONUP, DEV_EVENT_CONDOWN, /** * MUST be always the last element!! */ NR_DEV_EVENTS }; static const char *dev_event_names[] = { "Start", "Stop", "Connection up", "Connection down", }; /** * Events of the connection statemachine */ enum conn_events { /** * Events, representing callbacks from * lowlevel iucv layer) */ CONN_EVENT_CONN_REQ, CONN_EVENT_CONN_ACK, CONN_EVENT_CONN_REJ, CONN_EVENT_CONN_SUS, CONN_EVENT_CONN_RES, CONN_EVENT_RX, CONN_EVENT_TXDONE, /** * Events, representing errors return codes from * calls to lowlevel iucv layer */ /** * Event, representing timer expiry. */ CONN_EVENT_TIMER, /** * Events, representing commands from upper levels. */ CONN_EVENT_START, CONN_EVENT_STOP, /** * MUST be always the last element!! */ NR_CONN_EVENTS, }; static const char *conn_event_names[] = { "Remote connection request", "Remote connection acknowledge", "Remote connection reject", "Connection suspended", "Connection resumed", "Data received", "Data sent", "Timer", "Start", "Stop", }; /** * States of the connection statemachine. */ enum conn_states { /** * Connection not assigned to any device, * initial state, invalid */ CONN_STATE_INVALID, /** * Userid assigned but not operating */ CONN_STATE_STOPPED, /** * Connection registered, * no connection request sent yet, * no connection request received */ CONN_STATE_STARTWAIT, /** * Connection registered and connection request sent, * no acknowledge and no connection request received yet. */ CONN_STATE_SETUPWAIT, /** * Connection up and running idle */ CONN_STATE_IDLE, /** * Data sent, awaiting CONN_EVENT_TXDONE */ CONN_STATE_TX, /** * Error during registration. */ CONN_STATE_REGERR, /** * Error during registration. */ CONN_STATE_CONNERR, /** * MUST be always the last element!! */ NR_CONN_STATES, }; static const char *conn_state_names[] = { "Invalid", "Stopped", "StartWait", "SetupWait", "Idle", "TX", "Terminating", "Registration error", "Connect error", }; /** * Debug Facility Stuff */ static debug_info_t *iucv_dbf_setup = NULL; static debug_info_t *iucv_dbf_data = NULL; static debug_info_t *iucv_dbf_trace = NULL; DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); static void iucv_unregister_dbf_views(void) { if (iucv_dbf_setup) debug_unregister(iucv_dbf_setup); if (iucv_dbf_data) debug_unregister(iucv_dbf_data); if (iucv_dbf_trace) debug_unregister(iucv_dbf_trace); } static int iucv_register_dbf_views(void) { iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, IUCV_DBF_SETUP_PAGES, IUCV_DBF_SETUP_NR_AREAS, IUCV_DBF_SETUP_LEN); iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME, IUCV_DBF_DATA_PAGES, IUCV_DBF_DATA_NR_AREAS, IUCV_DBF_DATA_LEN); iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME, IUCV_DBF_TRACE_PAGES, IUCV_DBF_TRACE_NR_AREAS, IUCV_DBF_TRACE_LEN); if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) || (iucv_dbf_trace == NULL)) { iucv_unregister_dbf_views(); return -ENOMEM; } debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view); debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL); debug_register_view(iucv_dbf_data, &debug_hex_ascii_view); debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL); debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view); debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL); return 0; } /* * Callback-wrappers, called from lowlevel iucv layer. */ static void netiucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) { struct iucv_connection *conn = path->private; struct iucv_event ev; ev.conn = conn; ev.data = msg; fsm_event(conn->fsm, CONN_EVENT_RX, &ev); } static void netiucv_callback_txdone(struct iucv_path *path, struct iucv_message *msg) { struct iucv_connection *conn = path->private; struct iucv_event ev; ev.conn = conn; ev.data = msg; fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); } static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) { struct iucv_connection *conn = path->private; fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn); } static int netiucv_callback_connreq(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { struct iucv_connection *conn = path->private; struct iucv_event ev; int rc; if (memcmp(iucvMagic, ipuser, 16)) /* ipuser must match iucvMagic. */ return -EINVAL; rc = -EINVAL; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(conn, &iucv_connection_list, list) { if (strncmp(ipvmid, conn->userid, 8)) continue; /* Found a matching connection for this path. */ conn->path = path; ev.conn = conn; ev.data = path; fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); rc = 0; } read_unlock_bh(&iucv_connection_rwlock); return rc; } static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) { struct iucv_connection *conn = path->private; fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn); } static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16]) { struct iucv_connection *conn = path->private; fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn); } static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16]) { struct iucv_connection *conn = path->private; fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); } /** * NOP action for statemachines */ static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) { } /* * Actions of the connection statemachine */ /** * netiucv_unpack_skb * @conn: The connection where this skb has been received. * @pskb: The received skb. * * Unpack a just received skb and hand it over to upper layers. * Helper function for conn_action_rx. */ static void netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) { struct net_device *dev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(dev); u16 offset = 0; skb_put(pskb, NETIUCV_HDRLEN); pskb->dev = dev; pskb->ip_summed = CHECKSUM_NONE; pskb->protocol = ntohs(ETH_P_IP); while (1) { struct sk_buff *skb; struct ll_header *header = (struct ll_header *) pskb->data; if (!header->next) break; skb_pull(pskb, NETIUCV_HDRLEN); header->next -= offset; offset += header->next; header->next -= NETIUCV_HDRLEN; if (skb_tailroom(pskb) < header->next) { IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", header->next, skb_tailroom(pskb)); return; } skb_put(pskb, header->next); skb_reset_mac_header(pskb); skb = dev_alloc_skb(pskb->len); if (!skb) { IUCV_DBF_TEXT(data, 2, "Out of memory in netiucv_unpack_skb\n"); privptr->stats.rx_dropped++; return; } skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), pskb->len); skb_reset_mac_header(skb); skb->dev = pskb->dev; skb->protocol = pskb->protocol; pskb->ip_summed = CHECKSUM_UNNECESSARY; privptr->stats.rx_packets++; privptr->stats.rx_bytes += skb->len; /* * Since receiving is always initiated from a tasklet (in iucv.c), * we must use netif_rx_ni() instead of netif_rx() */ netif_rx_ni(skb); skb_pull(pskb, header->next); skb_put(pskb, NETIUCV_HDRLEN); } } static void conn_action_rx(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct iucv_message *msg = ev->data; struct netiucv_priv *privptr = netdev_priv(conn->netdev); int rc; IUCV_DBF_TEXT(trace, 4, __func__); if (!conn->netdev) { iucv_message_reject(conn->path, msg); IUCV_DBF_TEXT(data, 2, "Received data for unlinked connection\n"); return; } if (msg->length > conn->max_buffsize) { iucv_message_reject(conn->path, msg); privptr->stats.rx_dropped++; IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", msg->length, conn->max_buffsize); return; } conn->rx_buff->data = conn->rx_buff->head; skb_reset_tail_pointer(conn->rx_buff); conn->rx_buff->len = 0; rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, msg->length, NULL); if (rc || msg->length < 5) { privptr->stats.rx_errors++; IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); return; } netiucv_unpack_skb(conn, conn->rx_buff); } static void conn_action_txdone(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct iucv_message *msg = ev->data; struct iucv_message txmsg; struct netiucv_priv *privptr = NULL; u32 single_flag = msg->tag; u32 txbytes = 0; u32 txpackets = 0; u32 stat_maxcq = 0; struct sk_buff *skb; unsigned long saveflags; struct ll_header header; int rc; IUCV_DBF_TEXT(trace, 4, __func__); if (conn && conn->netdev) privptr = netdev_priv(conn->netdev); conn->prof.tx_pending--; if (single_flag) { if ((skb = skb_dequeue(&conn->commit_queue))) { atomic_dec(&skb->users); if (privptr) { privptr->stats.tx_packets++; privptr->stats.tx_bytes += (skb->len - NETIUCV_HDRLEN - NETIUCV_HDRLEN); } dev_kfree_skb_any(skb); } } conn->tx_buff->data = conn->tx_buff->head; skb_reset_tail_pointer(conn->tx_buff); conn->tx_buff->len = 0; spin_lock_irqsave(&conn->collect_lock, saveflags); while ((skb = skb_dequeue(&conn->collect_queue))) { header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); skb_copy_from_linear_data(skb, skb_put(conn->tx_buff, skb->len), skb->len); txbytes += skb->len; txpackets++; stat_maxcq++; atomic_dec(&skb->users); dev_kfree_skb_any(skb); } if (conn->collect_len > conn->prof.maxmulti) conn->prof.maxmulti = conn->collect_len; conn->collect_len = 0; spin_unlock_irqrestore(&conn->collect_lock, saveflags); if (conn->tx_buff->len == 0) { fsm_newstate(fi, CONN_STATE_IDLE); return; } header.next = 0; memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); conn->prof.send_stamp = current_kernel_time(); txmsg.class = 0; txmsg.tag = 0; rc = iucv_message_send(conn->path, &txmsg, 0, 0, conn->tx_buff->data, conn->tx_buff->len); conn->prof.doios_multi++; conn->prof.txlen += conn->tx_buff->len; conn->prof.tx_pending++; if (conn->prof.tx_pending > conn->prof.tx_max_pending) conn->prof.tx_max_pending = conn->prof.tx_pending; if (rc) { conn->prof.tx_pending--; fsm_newstate(fi, CONN_STATE_IDLE); if (privptr) privptr->stats.tx_errors += txpackets; IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); } else { if (privptr) { privptr->stats.tx_packets += txpackets; privptr->stats.tx_bytes += txbytes; } if (stat_maxcq > conn->prof.maxcqueue) conn->prof.maxcqueue = stat_maxcq; } } static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct iucv_path *path = ev->data; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); int rc; IUCV_DBF_TEXT(trace, 3, __func__); conn->path = path; path->msglim = NETIUCV_QUEUELEN_DEFAULT; path->flags = 0; rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); if (rc) { IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); return; } fsm_newstate(fi, CONN_STATE_IDLE); netdev->tx_queue_len = conn->path->msglim; fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); } static void conn_action_connreject(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_path *path = ev->data; IUCV_DBF_TEXT(trace, 3, __func__); iucv_path_sever(path, NULL); } static void conn_action_connack(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); fsm_newstate(fi, CONN_STATE_IDLE); netdev->tx_queue_len = conn->path->msglim; fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); } static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); iucv_path_sever(conn->path, NULL); fsm_newstate(fi, CONN_STATE_STARTWAIT); } static void conn_action_connsever(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); iucv_path_sever(conn->path, NULL); dev_info(privptr->dev, "The peer interface of the IUCV device" " has closed the connection\n"); IUCV_DBF_TEXT(data, 2, "conn_action_connsever: Remote dropped connection\n"); fsm_newstate(fi, CONN_STATE_STARTWAIT); fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); } static void conn_action_start(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); int rc; IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, CONN_STATE_STARTWAIT); IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", netdev->name, conn->userid); /* * We must set the state before calling iucv_connect because the * callback handler could be called at any point after the connection * request is sent */ fsm_newstate(fi, CONN_STATE_SETUPWAIT); conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, NULL, iucvMagic, conn); switch (rc) { case 0: netdev->tx_queue_len = conn->path->msglim; fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, CONN_EVENT_TIMER, conn); return; case 11: dev_warn(privptr->dev, "The IUCV device failed to connect to z/VM guest %s\n", netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 12: dev_warn(privptr->dev, "The IUCV device failed to connect to the peer on z/VM" " guest %s\n", netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_STARTWAIT); break; case 13: dev_err(privptr->dev, "Connecting the IUCV device would exceed the maximum" " number of IUCV connections\n"); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 14: dev_err(privptr->dev, "z/VM guest %s has too many IUCV connections" " to connect with the IUCV device\n", netiucv_printname(conn->userid)); fsm_newstate(fi, CONN_STATE_CONNERR); break; case 15: dev_err(privptr->dev, "The IUCV device cannot connect to a z/VM guest with no" " IUCV authorization\n"); fsm_newstate(fi, CONN_STATE_CONNERR); break; default: dev_err(privptr->dev, "Connecting the IUCV device failed with error %d\n", rc); fsm_newstate(fi, CONN_STATE_CONNERR); break; } IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); kfree(conn->path); conn->path = NULL; } static void netiucv_purge_skb_queue(struct sk_buff_head *q) { struct sk_buff *skb; while ((skb = skb_dequeue(q))) { atomic_dec(&skb->users); dev_kfree_skb_any(skb); } } static void conn_action_stop(fsm_instance *fi, int event, void *arg) { struct iucv_event *ev = arg; struct iucv_connection *conn = ev->conn; struct net_device *netdev = conn->netdev; struct netiucv_priv *privptr = netdev_priv(netdev); IUCV_DBF_TEXT(trace, 3, __func__); fsm_deltimer(&conn->timer); fsm_newstate(fi, CONN_STATE_STOPPED); netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); iucv_path_sever(conn->path, iucvMagic); kfree(conn->path); conn->path = NULL; } netiucv_purge_skb_queue(&conn->commit_queue); fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); } static void conn_action_inval(fsm_instance *fi, int event, void *arg) { struct iucv_connection *conn = arg; struct net_device *netdev = conn->netdev; IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n", netdev->name, conn->userid); } static const fsm_node conn_fsm[] = { { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval }, { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start }, { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop }, { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject }, { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject }, { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject }, { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack }, { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev }, { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever }, { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever }, { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever }, { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx }, { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx }, { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone }, { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone }, }; static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); /* * Actions for interface - statemachine. */ /** * dev_action_start * @fi: An instance of an interface statemachine. * @event: The event, just happened. * @arg: Generic pointer, casted from struct net_device * upon call. * * Startup connection by sending CONN_EVENT_START to it. */ static void dev_action_start(fsm_instance *fi, int event, void *arg) { struct net_device *dev = arg; struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __func__); fsm_newstate(fi, DEV_STATE_STARTWAIT); fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); } /** * Shutdown connection by sending CONN_EVENT_STOP to it. * * @param fi An instance of an interface statemachine. * @param event The event, just happened. * @param arg Generic pointer, casted from struct net_device * upon call. */ static void dev_action_stop(fsm_instance *fi, int event, void *arg) { struct net_device *dev = arg; struct netiucv_priv *privptr = netdev_priv(dev); struct iucv_event ev; IUCV_DBF_TEXT(trace, 3, __func__); ev.conn = privptr->conn; fsm_newstate(fi, DEV_STATE_STOPWAIT); fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev); } /** * Called from connection statemachine * when a connection is up and running. * * @param fi An instance of an interface statemachine. * @param event The event, just happened. * @param arg Generic pointer, casted from struct net_device * upon call. */ static void dev_action_connup(fsm_instance *fi, int event, void *arg) { struct net_device *dev = arg; struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __func__); switch (fsm_getstate(fi)) { case DEV_STATE_STARTWAIT: fsm_newstate(fi, DEV_STATE_RUNNING); dev_info(privptr->dev, "The IUCV device has been connected" " successfully to %s\n", privptr->conn->userid); IUCV_DBF_TEXT(setup, 3, "connection is up and running\n"); break; case DEV_STATE_STOPWAIT: IUCV_DBF_TEXT(data, 2, "dev_action_connup: in DEV_STATE_STOPWAIT\n"); break; } } /** * Called from connection statemachine * when a connection has been shutdown. * * @param fi An instance of an interface statemachine. * @param event The event, just happened. * @param arg Generic pointer, casted from struct net_device * upon call. */ static void dev_action_conndown(fsm_instance *fi, int event, void *arg) { IUCV_DBF_TEXT(trace, 3, __func__); switch (fsm_getstate(fi)) { case DEV_STATE_RUNNING: fsm_newstate(fi, DEV_STATE_STARTWAIT); break; case DEV_STATE_STOPWAIT: fsm_newstate(fi, DEV_STATE_STOPPED); IUCV_DBF_TEXT(setup, 3, "connection is down\n"); break; } } static const fsm_node dev_fsm[] = { { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start }, { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown }, { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop }, { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup }, { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown }, { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop }, }; static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); /** * Transmit a packet. * This is a helper function for netiucv_tx(). * * @param conn Connection to be used for sending. * @param skb Pointer to struct sk_buff of packet to send. * The linklevel header has already been set up * by netiucv_tx(). * * @return 0 on success, -ERRNO on failure. (Never fails.) */ static int netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { struct iucv_message msg; unsigned long saveflags; struct ll_header header; int rc; if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { int l = skb->len + NETIUCV_HDRLEN; spin_lock_irqsave(&conn->collect_lock, saveflags); if (conn->collect_len + l > (conn->max_buffsize - NETIUCV_HDRLEN)) { rc = -EBUSY; IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_transmit_skb\n"); } else { atomic_inc(&skb->users); skb_queue_tail(&conn->collect_queue, skb); conn->collect_len += l; rc = 0; } spin_unlock_irqrestore(&conn->collect_lock, saveflags); } else { struct sk_buff *nskb = skb; /** * Copy the skb to a new allocated skb in lowmem only if the * data is located above 2G in memory or tailroom is < 2. */ unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + NETIUCV_HDRLEN)) >> 31; int copied = 0; if (hi || (skb_tailroom(skb) < 2)) { nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); if (!nskb) { IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); rc = -ENOMEM; return rc; } else { skb_reserve(nskb, NETIUCV_HDRLEN); memcpy(skb_put(nskb, skb->len), skb->data, skb->len); } copied = 1; } /** * skb now is below 2G and has enough room. Add headers. */ header.next = nskb->len + NETIUCV_HDRLEN; memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); header.next = 0; memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); fsm_newstate(conn->fsm, CONN_STATE_TX); conn->prof.send_stamp = current_kernel_time(); msg.tag = 1; msg.class = 0; rc = iucv_message_send(conn->path, &msg, 0, 0, nskb->data, nskb->len); conn->prof.doios_single++; conn->prof.txlen += skb->len; conn->prof.tx_pending++; if (conn->prof.tx_pending > conn->prof.tx_max_pending) conn->prof.tx_max_pending = conn->prof.tx_pending; if (rc) { struct netiucv_priv *privptr; fsm_newstate(conn->fsm, CONN_STATE_IDLE); conn->prof.tx_pending--; privptr = netdev_priv(conn->netdev); if (privptr) privptr->stats.tx_errors++; if (copied) dev_kfree_skb(nskb); else { /** * Remove our headers. They get added * again on retransmit. */ skb_pull(skb, NETIUCV_HDRLEN); skb_trim(skb, skb->len - NETIUCV_HDRLEN); } IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); } else { if (copied) dev_kfree_skb(skb); atomic_inc(&nskb->users); skb_queue_tail(&conn->commit_queue, nskb); } } return rc; } /* * Interface API for upper network layers */ /** * Open an interface. * Called from generic network layer when ifconfig up is run. * * @param dev Pointer to interface struct. * * @return 0 on success, -ERRNO on failure. (Never fails.) */ static int netiucv_open(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); fsm_event(priv->fsm, DEV_EVENT_START, dev); return 0; } /** * Close an interface. * Called from generic network layer when ifconfig down is run. * * @param dev Pointer to interface struct. * * @return 0 on success, -ERRNO on failure. (Never fails.) */ static int netiucv_close(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); fsm_event(priv->fsm, DEV_EVENT_STOP, dev); return 0; } static int netiucv_pm_prepare(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); return 0; } static void netiucv_pm_complete(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); return; } /** * netiucv_pm_freeze() - Freeze PM callback * @dev: netiucv device * * close open netiucv interfaces */ static int netiucv_pm_freeze(struct device *dev) { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = NULL; int rc = 0; IUCV_DBF_TEXT(trace, 3, __func__); if (priv && priv->conn) ndev = priv->conn->netdev; if (!ndev) goto out; netif_device_detach(ndev); priv->pm_state = fsm_getstate(priv->fsm); rc = netiucv_close(ndev); out: return rc; } /** * netiucv_pm_restore_thaw() - Thaw and restore PM callback * @dev: netiucv device * * re-open netiucv interfaces closed during freeze */ static int netiucv_pm_restore_thaw(struct device *dev) { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = NULL; int rc = 0; IUCV_DBF_TEXT(trace, 3, __func__); if (priv && priv->conn) ndev = priv->conn->netdev; if (!ndev) goto out; switch (priv->pm_state) { case DEV_STATE_RUNNING: case DEV_STATE_STARTWAIT: rc = netiucv_open(ndev); break; default: break; } netif_device_attach(ndev); out: return rc; } /** * Start transmission of a packet. * Called from generic network device layer. * * @param skb Pointer to buffer containing the packet. * @param dev Pointer to interface struct. * * @return 0 if packet consumed, !0 if packet rejected. * Note: If we return !0, then the packet is free'd by * the generic network layer. */ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) { struct netiucv_priv *privptr = netdev_priv(dev); int rc; IUCV_DBF_TEXT(trace, 4, __func__); /** * Some sanity checks ... */ if (skb == NULL) { IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); privptr->stats.tx_dropped++; return NETDEV_TX_OK; } if (skb_headroom(skb) < NETIUCV_HDRLEN) { IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); dev_kfree_skb(skb); privptr->stats.tx_dropped++; return NETDEV_TX_OK; } /** * If connection is not running, try to restart it * and throw away packet. */ if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { dev_kfree_skb(skb); privptr->stats.tx_dropped++; privptr->stats.tx_errors++; privptr->stats.tx_carrier_errors++; return NETDEV_TX_OK; } if (netiucv_test_and_set_busy(dev)) { IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n"); return NETDEV_TX_BUSY; } dev->trans_start = jiffies; rc = netiucv_transmit_skb(privptr->conn, skb); netiucv_clear_busy(dev); return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK; } /** * netiucv_stats * @dev: Pointer to interface struct. * * Returns interface statistics of a device. * * Returns pointer to stats struct of this interface. */ static struct net_device_stats *netiucv_stats (struct net_device * dev) { struct netiucv_priv *priv = netdev_priv(dev); IUCV_DBF_TEXT(trace, 5, __func__); return &priv->stats; } /** * netiucv_change_mtu * @dev: Pointer to interface struct. * @new_mtu: The new MTU to use for this interface. * * Sets MTU of an interface. * * Returns 0 on success, -EINVAL if MTU is out of valid range. * (valid range is 576 .. NETIUCV_MTU_MAX). */ static int netiucv_change_mtu(struct net_device * dev, int new_mtu) { IUCV_DBF_TEXT(trace, 3, __func__); if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); return -EINVAL; } dev->mtu = new_mtu; return 0; } /* * attributes in sysfs */ static ssize_t user_show(struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); } static ssize_t user_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->conn->netdev; char *p; char *tmp; char username[9]; int i; struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __func__); if (count > 9) { IUCV_DBF_TEXT_(setup, 2, "%d is length of username\n", (int) count); return -EINVAL; } tmp = strsep((char **) &buf, "\n"); for (i = 0, p = tmp; i < 8 && *p; i++, p++) { if (isalnum(*p) || (*p == '$')) { username[i]= toupper(*p); continue; } if (*p == '\n') { /* trailing lf, grr */ break; } IUCV_DBF_TEXT_(setup, 2, "username: invalid character %c\n", *p); return -EINVAL; } while (i < 8) username[i++] = ' '; username[8] = '\0'; if (memcmp(username, priv->conn->userid, 9) && (ndev->flags & (IFF_UP | IFF_RUNNING))) { /* username changed while the interface is active. */ IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); return -EPERM; } read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { read_unlock_bh(&iucv_connection_rwlock); IUCV_DBF_TEXT_(setup, 2, "user_write: Connection " "to %s already exists\n", username); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); memcpy(priv->conn->userid, username, 9); return count; } static DEVICE_ATTR(user, 0644, user_show, user_write); static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%d\n", priv->conn->max_buffsize); } static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->conn->netdev; char *e; int bs1; IUCV_DBF_TEXT(trace, 3, __func__); if (count >= 39) return -EINVAL; bs1 = simple_strtoul(buf, &e, 0); if (e && (!isspace(*e))) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); return -EINVAL; } if (bs1 > NETIUCV_BUFSIZE_MAX) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too large\n", bs1); return -EINVAL; } if ((ndev->flags & IFF_RUNNING) && (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too small\n", bs1); return -EINVAL; } if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { IUCV_DBF_TEXT_(setup, 2, "buffer_write: buffer size %d too small\n", bs1); return -EINVAL; } priv->conn->max_buffsize = bs1; if (!(ndev->flags & IFF_RUNNING)) ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN; return count; } static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); } static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); static ssize_t conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); } static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); static ssize_t maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); } static ssize_t maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.maxmulti = 0; return count; } static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); } static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.maxcqueue = 0; return count; } static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); } static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.doios_single = 0; return count; } static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); } static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); priv->conn->prof.doios_multi = 0; return count; } static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.txlen); } static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.txlen = 0; return count; } static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); } static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.tx_time = 0; return count; } static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); } static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.tx_pending = 0; return count; } static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 5, __func__); return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); } static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct netiucv_priv *priv = dev_get_drvdata(dev); IUCV_DBF_TEXT(trace, 4, __func__); priv->conn->prof.tx_max_pending = 0; return count; } static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write); static struct attribute *netiucv_attrs[] = { &dev_attr_buffer.attr, &dev_attr_user.attr, NULL, }; static struct attribute_group netiucv_attr_group = { .attrs = netiucv_attrs, }; static struct attribute *netiucv_stat_attrs[] = { &dev_attr_device_fsm_state.attr, &dev_attr_connection_fsm_state.attr, &dev_attr_max_tx_buffer_used.attr, &dev_attr_max_chained_skbs.attr, &dev_attr_tx_single_write_ops.attr, &dev_attr_tx_multi_write_ops.attr, &dev_attr_netto_bytes.attr, &dev_attr_max_tx_io_time.attr, &dev_attr_tx_pending.attr, &dev_attr_tx_max_pending.attr, NULL, }; static struct attribute_group netiucv_stat_attr_group = { .name = "stats", .attrs = netiucv_stat_attrs, }; static int netiucv_add_files(struct device *dev) { int ret; IUCV_DBF_TEXT(trace, 3, __func__); ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group); if (ret) return ret; ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group); if (ret) sysfs_remove_group(&dev->kobj, &netiucv_attr_group); return ret; } static void netiucv_remove_files(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); sysfs_remove_group(&dev->kobj, &netiucv_attr_group); } static int netiucv_register_device(struct net_device *ndev) { struct netiucv_priv *priv = netdev_priv(ndev); struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); int ret; IUCV_DBF_TEXT(trace, 3, __func__); if (dev) { dev_set_name(dev, "net%s", ndev->name); dev->bus = &iucv_bus; dev->parent = iucv_root; /* * The release function could be called after the * module has been unloaded. It's _only_ task is to * free the struct. Therefore, we specify kfree() * directly here. (Probably a little bit obfuscating * but legitime ...). */ dev->release = (void (*)(struct device *))kfree; dev->driver = &netiucv_driver; } else return -ENOMEM; ret = device_register(dev); if (ret) { put_device(dev); return ret; } ret = netiucv_add_files(dev); if (ret) goto out_unreg; priv->dev = dev; dev_set_drvdata(dev, priv); return 0; out_unreg: device_unregister(dev); return ret; } static void netiucv_unregister_device(struct device *dev) { IUCV_DBF_TEXT(trace, 3, __func__); netiucv_remove_files(dev); device_unregister(dev); } /** * Allocate and initialize a new connection structure. * Add it to the list of netiucv connections; */ static struct iucv_connection *netiucv_new_connection(struct net_device *dev, char *username) { struct iucv_connection *conn; conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) goto out; skb_queue_head_init(&conn->collect_queue); skb_queue_head_init(&conn->commit_queue); spin_lock_init(&conn->collect_lock); conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; conn->netdev = dev; conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); if (!conn->rx_buff) goto out_conn; conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); if (!conn->tx_buff) goto out_rx; conn->fsm = init_fsm("netiucvconn", conn_state_names, conn_event_names, NR_CONN_STATES, NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, GFP_KERNEL); if (!conn->fsm) goto out_tx; fsm_settimer(conn->fsm, &conn->timer); fsm_newstate(conn->fsm, CONN_STATE_INVALID); if (username) { memcpy(conn->userid, username, 9); fsm_newstate(conn->fsm, CONN_STATE_STOPPED); } write_lock_bh(&iucv_connection_rwlock); list_add_tail(&conn->list, &iucv_connection_list); write_unlock_bh(&iucv_connection_rwlock); return conn; out_tx: kfree_skb(conn->tx_buff); out_rx: kfree_skb(conn->rx_buff); out_conn: kfree(conn); out: return NULL; } /** * Release a connection structure and remove it from the * list of netiucv connections. */ static void netiucv_remove_connection(struct iucv_connection *conn) { IUCV_DBF_TEXT(trace, 3, __func__); write_lock_bh(&iucv_connection_rwlock); list_del_init(&conn->list); write_unlock_bh(&iucv_connection_rwlock); fsm_deltimer(&conn->timer); netiucv_purge_skb_queue(&conn->collect_queue); if (conn->path) { iucv_path_sever(conn->path, iucvMagic); kfree(conn->path); conn->path = NULL; } netiucv_purge_skb_queue(&conn->commit_queue); kfree_fsm(conn->fsm); kfree_skb(conn->rx_buff); kfree_skb(conn->tx_buff); } /** * Release everything of a net device. */ static void netiucv_free_netdevice(struct net_device *dev) { struct netiucv_priv *privptr = netdev_priv(dev); IUCV_DBF_TEXT(trace, 3, __func__); if (!dev) return; if (privptr) { if (privptr->conn) netiucv_remove_connection(privptr->conn); if (privptr->fsm) kfree_fsm(privptr->fsm); privptr->conn = NULL; privptr->fsm = NULL; /* privptr gets freed by free_netdev() */ } free_netdev(dev); } /** * Initialize a net device. (Called from kernel in alloc_netdev()) */ static const struct net_device_ops netiucv_netdev_ops = { .ndo_open = netiucv_open, .ndo_stop = netiucv_close, .ndo_get_stats = netiucv_stats, .ndo_start_xmit = netiucv_tx, .ndo_change_mtu = netiucv_change_mtu, }; static void netiucv_setup_netdevice(struct net_device *dev) { dev->mtu = NETIUCV_MTU_DEFAULT; dev->destructor = netiucv_free_netdevice; dev->hard_header_len = NETIUCV_HDRLEN; dev->addr_len = 0; dev->type = ARPHRD_SLIP; dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->netdev_ops = &netiucv_netdev_ops; } /** * Allocate and initialize everything of a net device. */ static struct net_device *netiucv_init_netdevice(char *username) { struct netiucv_priv *privptr; struct net_device *dev; dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d", netiucv_setup_netdevice); if (!dev) return NULL; privptr = netdev_priv(dev); privptr->fsm = init_fsm("netiucvdev", dev_state_names, dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, dev_fsm, DEV_FSM_LEN, GFP_KERNEL); if (!privptr->fsm) goto out_netdev; privptr->conn = netiucv_new_connection(dev, username); if (!privptr->conn) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); goto out_fsm; } fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); return dev; out_fsm: kfree_fsm(privptr->fsm); out_netdev: free_netdev(dev); return NULL; } static ssize_t conn_write(struct device_driver *drv, const char *buf, size_t count) { const char *p; char username[9]; int i, rc; struct net_device *dev; struct netiucv_priv *priv; struct iucv_connection *cp; IUCV_DBF_TEXT(trace, 3, __func__); if (count>9) { IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); return -EINVAL; } for (i = 0, p = buf; i < 8 && *p; i++, p++) { if (isalnum(*p) || *p == '$') { username[i] = toupper(*p); continue; } if (*p == '\n') /* trailing lf, grr */ break; IUCV_DBF_TEXT_(setup, 2, "conn_write: invalid character %c\n", *p); return -EINVAL; } while (i < 8) username[i++] = ' '; username[8] = '\0'; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { if (!strncmp(username, cp->userid, 9)) { read_unlock_bh(&iucv_connection_rwlock); IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection " "to %s already exists\n", username); return -EEXIST; } } read_unlock_bh(&iucv_connection_rwlock); dev = netiucv_init_netdevice(username); if (!dev) { IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); return -ENODEV; } rc = netiucv_register_device(dev); if (rc) { IUCV_DBF_TEXT_(setup, 2, "ret %d from netiucv_register_device\n", rc); goto out_free_ndev; } /* sysfs magic */ priv = netdev_priv(dev); SET_NETDEV_DEV(dev, priv->dev); rc = register_netdev(dev); if (rc) goto out_unreg; dev_info(priv->dev, "The IUCV interface to %s has been" " established successfully\n", netiucv_printname(username)); return count; out_unreg: netiucv_unregister_device(priv->dev); out_free_ndev: netiucv_free_netdevice(dev); return rc; } static DRIVER_ATTR(connection, 0200, NULL, conn_write); static ssize_t remove_write (struct device_driver *drv, const char *buf, size_t count) { struct iucv_connection *cp; struct net_device *ndev; struct netiucv_priv *priv; struct device *dev; char name[IFNAMSIZ]; const char *p; int i; IUCV_DBF_TEXT(trace, 3, __func__); if (count >= IFNAMSIZ) count = IFNAMSIZ - 1; for (i = 0, p = buf; i < count && *p; i++, p++) { if (*p == '\n' || *p == ' ') /* trailing lf, grr */ break; name[i] = *p; } name[i] = '\0'; read_lock_bh(&iucv_connection_rwlock); list_for_each_entry(cp, &iucv_connection_list, list) { ndev = cp->netdev; priv = netdev_priv(ndev); dev = priv->dev; if (strncmp(name, ndev->name, count)) continue; read_unlock_bh(&iucv_connection_rwlock); if (ndev->flags & (IFF_UP | IFF_RUNNING)) { dev_warn(dev, "The IUCV device is connected" " to %s and cannot be removed\n", priv->conn->userid); IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); return -EPERM; } unregister_netdev(ndev); netiucv_unregister_device(dev); return count; } read_unlock_bh(&iucv_connection_rwlock); IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); return -EINVAL; } static DRIVER_ATTR(remove, 0200, NULL, remove_write); static struct attribute * netiucv_drv_attrs[] = { &driver_attr_connection.attr, &driver_attr_remove.attr, NULL, }; static struct attribute_group netiucv_drv_attr_group = { .attrs = netiucv_drv_attrs, }; static const struct attribute_group *netiucv_drv_attr_groups[] = { &netiucv_drv_attr_group, NULL, }; static void netiucv_banner(void) { pr_info("driver initialized\n"); } static void __exit netiucv_exit(void) { struct iucv_connection *cp; struct net_device *ndev; struct netiucv_priv *priv; struct device *dev; IUCV_DBF_TEXT(trace, 3, __func__); while (!list_empty(&iucv_connection_list)) { cp = list_entry(iucv_connection_list.next, struct iucv_connection, list); ndev = cp->netdev; priv = netdev_priv(ndev); dev = priv->dev; unregister_netdev(ndev); netiucv_unregister_device(dev); } device_unregister(netiucv_dev); driver_unregister(&netiucv_driver); iucv_unregister(&netiucv_handler, 1); iucv_unregister_dbf_views(); pr_info("driver unloaded\n"); return; } static int __init netiucv_init(void) { int rc; rc = iucv_register_dbf_views(); if (rc) goto out; rc = iucv_register(&netiucv_handler, 1); if (rc) goto out_dbf; IUCV_DBF_TEXT(trace, 3, __func__); netiucv_driver.groups = netiucv_drv_attr_groups; rc = driver_register(&netiucv_driver); if (rc) { IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); goto out_iucv; } /* establish dummy device */ netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!netiucv_dev) { rc = -ENOMEM; goto out_driver; } dev_set_name(netiucv_dev, "netiucv"); netiucv_dev->bus = &iucv_bus; netiucv_dev->parent = iucv_root; netiucv_dev->release = (void (*)(struct device *))kfree; netiucv_dev->driver = &netiucv_driver; rc = device_register(netiucv_dev); if (rc) { put_device(netiucv_dev); goto out_driver; } netiucv_banner(); return rc; out_driver: driver_unregister(&netiucv_driver); out_iucv: iucv_unregister(&netiucv_handler, 1); out_dbf: iucv_unregister_dbf_views(); out: return rc; } module_init(netiucv_init); module_exit(netiucv_exit); MODULE_LICENSE("GPL");
gpl-2.0
BlissRoms-Kernels/kernel_motorola_BlissPure
drivers/media/dvb-frontends/isl6421.c
3569
4526
/* * isl6421.h - driver for lnb supply and control ic ISL6421 * * Copyright (C) 2006 Andrew de Quincey * Copyright (C) 2006 Oliver Endriss * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "isl6421.h" struct isl6421 { u8 config; u8 override_or; u8 override_and; struct i2c_adapter *i2c; u8 i2c_addr; }; static int isl6421_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; isl6421->config &= ~(ISL6421_VSEL1 | ISL6421_EN1); switch(voltage) { case SEC_VOLTAGE_OFF: break; case SEC_VOLTAGE_13: isl6421->config |= ISL6421_EN1; break; case SEC_VOLTAGE_18: isl6421->config |= (ISL6421_EN1 | ISL6421_VSEL1); break; default: return -EINVAL; } isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int isl6421_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; if (arg) isl6421->config |= ISL6421_LLC1; else isl6421->config &= ~ISL6421_LLC1; isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static int isl6421_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) { struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv; struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0, .buf = &isl6421->config, .len = sizeof(isl6421->config) }; switch (tone) { case SEC_TONE_ON: isl6421->config |= ISL6421_ENT1; break; case SEC_TONE_OFF: isl6421->config &= ~ISL6421_ENT1; break; default: return -EINVAL; } isl6421->config |= isl6421->override_or; isl6421->config &= isl6421->override_and; return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO; } static void isl6421_release(struct dvb_frontend *fe) { /* power off */ isl6421_set_voltage(fe, SEC_VOLTAGE_OFF); /* free */ kfree(fe->sec_priv); fe->sec_priv = NULL; } struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 i2c_addr, u8 override_set, u8 override_clear, bool override_tone) { struct isl6421 *isl6421 = kmalloc(sizeof(struct isl6421), GFP_KERNEL); if (!isl6421) return NULL; /* default configuration */ isl6421->config = ISL6421_ISEL1; isl6421->i2c = i2c; isl6421->i2c_addr = i2c_addr; fe->sec_priv = isl6421; /* bits which should be forced to '1' */ isl6421->override_or = override_set; /* bits which should be forced to '0' */ isl6421->override_and = ~override_clear; /* detect if it is present or not */ if (isl6421_set_voltage(fe, SEC_VOLTAGE_OFF)) { kfree(isl6421); fe->sec_priv = NULL; return NULL; } /* install release callback */ fe->ops.release_sec = isl6421_release; /* override frontend ops */ fe->ops.set_voltage = isl6421_set_voltage; fe->ops.enable_high_lnb_voltage = isl6421_enable_high_lnb_voltage; if (override_tone) fe->ops.set_tone = isl6421_set_tone; return fe; } EXPORT_SYMBOL(isl6421_attach); MODULE_DESCRIPTION("Driver for lnb supply and control ic isl6421"); MODULE_AUTHOR("Andrew de Quincey & Oliver Endriss"); MODULE_LICENSE("GPL");
gpl-2.0
ztemt/Z5_H112_kernel
net/ipv6/exthdrs.c
4337
21538
/* * Extension Header handling for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Andi Kleen <ak@muc.de> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* Changes: * yoshfuji : ensure not to overrun while parsing * tlv options. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs(). * YOSHIFUJI Hideaki @USAGI Register inbound extension header * handlers as inet6_protocol{}. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/icmpv6.h> #include <linux/slab.h> #include <linux/export.h> #include <net/dst.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) #include <net/xfrm.h> #endif #include <asm/uaccess.h> int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) { const unsigned char *nh = skb_network_header(skb); int packet_len = skb->tail - skb->network_header; struct ipv6_opt_hdr *hdr; int len; if (offset + 2 > packet_len) goto bad; hdr = (struct ipv6_opt_hdr *)(nh + offset); len = ((hdr->hdrlen + 1) << 3); if (offset + len > packet_len) goto bad; offset += 2; len -= 2; while (len > 0) { int opttype = nh[offset]; int optlen; if (opttype == type) return offset; switch (opttype) { case IPV6_TLV_PAD0: optlen = 1; break; default: optlen = nh[offset + 1] + 2; if (optlen > len) goto bad; break; } offset += optlen; len -= optlen; } /* not_found */ bad: return -1; } EXPORT_SYMBOL_GPL(ipv6_find_tlv); /* * Parsing tlv encoded headers. * * Parsing function "func" returns 1, if parsing succeed * and 0, if it failed. * It MUST NOT touch skb->h. */ struct tlvtype_proc { int type; int (*func)(struct sk_buff *skb, int offset); }; /********************* Generic functions *********************/ /* An unknown option is detected, decide what to do */ static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff) { switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { case 0: /* ignore */ return 1; case 1: /* drop packet */ break; case 3: /* Send ICMP if not a multicast address and drop packet */ /* Actually, it is redundant check. icmp_send will recheck in any case. */ if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) break; case 2: /* send ICMP PARM PROB regardless and drop packet */ icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff); return 0; } kfree_skb(skb); return 0; } /* Parse tlv encoded option header (hop-by-hop or destination) */ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb) { struct tlvtype_proc *curr; const unsigned char *nh = skb_network_header(skb); int off = skb_network_header_len(skb); int len = (skb_transport_header(skb)[1] + 1) << 3; if (skb_transport_offset(skb) + len > skb_headlen(skb)) goto bad; off += 2; len -= 2; while (len > 0) { int optlen = nh[off + 1] + 2; switch (nh[off]) { case IPV6_TLV_PAD0: optlen = 1; break; case IPV6_TLV_PADN: break; default: /* Other TLV code so scan list */ if (optlen > len) goto bad; for (curr=procs; curr->type >= 0; curr++) { if (curr->type == nh[off]) { /* type specific length/alignment checks will be performed in the func(). */ if (curr->func(skb, off) == 0) return 0; break; } } if (curr->type < 0) { if (ip6_tlvopt_unknown(skb, off) == 0) return 0; } break; } off += optlen; len -= optlen; } if (len == 0) return 1; bad: kfree_skb(skb); return 0; } /***************************** Destination options header. *****************************/ #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) static int ipv6_dest_hao(struct sk_buff *skb, int optoff) { struct ipv6_destopt_hao *hao; struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct in6_addr tmp_addr; int ret; if (opt->dsthao) { LIMIT_NETDEBUG(KERN_DEBUG "hao duplicated\n"); goto discard; } opt->dsthao = opt->dst1; opt->dst1 = 0; hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); if (hao->length != 16) { LIMIT_NETDEBUG( KERN_DEBUG "hao invalid option length = %d\n", hao->length); goto discard; } if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) { LIMIT_NETDEBUG( KERN_DEBUG "hao is not an unicast addr: %pI6\n", &hao->addr); goto discard; } ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr, (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS); if (unlikely(ret < 0)) goto discard; if (skb_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto discard; /* update all variable using below by copied skbuff */ hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); ipv6h = ipv6_hdr(skb); } if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; tmp_addr = ipv6h->saddr; ipv6h->saddr = hao->addr; hao->addr = tmp_addr; if (skb->tstamp.tv64 == 0) __net_timestamp(skb); return 1; discard: kfree_skb(skb); return 0; } #endif static struct tlvtype_proc tlvprocdestopt_lst[] = { #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) { .type = IPV6_TLV_HAO, .func = ipv6_dest_hao, }, #endif {-1, NULL} }; static int ipv6_destopt_rcv(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) __u16 dstbuf; #endif struct dst_entry *dst = skb_dst(skb); if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } opt->lastopt = opt->dst1 = skb_network_header_len(skb); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) dstbuf = opt->dst1; #endif if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) { skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) opt->nhoff = dstbuf; #else opt->nhoff = opt->dst1; #endif return 1; } IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); return -1; } /******************************** Routing header. ********************************/ /* called with rcu_read_lock() */ static int ipv6_rthdr_rcv(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); struct in6_addr *addr = NULL; struct in6_addr daddr; struct inet6_dev *idev; int n, i; struct ipv6_rt_hdr *hdr; struct rt0_hdr *rthdr; struct net *net = dev_net(skb->dev); int accept_source_route = net->ipv6.devconf_all->accept_source_route; idev = __in6_dev_get(skb->dev); if (idev && accept_source_route > idev->cnf.accept_source_route) accept_source_route = idev->cnf.accept_source_route; if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || skb->pkt_type != PACKET_HOST) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } looped_back: if (hdr->segments_left == 0) { switch (hdr->type) { #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPV6_SRCRT_TYPE_2: /* Silently discard type 2 header unless it was * processed by own */ if (!addr) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } break; #endif default: break; } opt->lastopt = opt->srcrt = skb_network_header_len(skb); skb->transport_header += (hdr->hdrlen + 1) << 3; opt->dst0 = opt->dst1; opt->dst1 = 0; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } switch (hdr->type) { #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPV6_SRCRT_TYPE_2: if (accept_source_route < 0) goto unknown_rh; /* Silently discard invalid RTH type 2 */ if (hdr->hdrlen != 2 || hdr->segments_left != 1) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } break; #endif default: goto unknown_rh; } /* * This is the routing header forwarding algorithm from * RFC 2460, page 16. */ n = hdr->hdrlen >> 1; if (hdr->segments_left > n) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); return -1; } /* We are about to mangle packet header. Be careful! Do not damage packets queued somewhere. */ if (skb_cloned(skb)) { /* the copy is a forwarded packet */ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -1; } hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); } if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; i = n - --hdr->segments_left; rthdr = (struct rt0_hdr *) hdr; addr = rthdr->addr; addr += i - 1; switch (hdr->type) { #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPV6_SRCRT_TYPE_2: if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr, IPPROTO_ROUTING) < 0) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } break; #endif default: break; } if (ipv6_addr_is_multicast(addr)) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } daddr = *addr; *addr = ipv6_hdr(skb)->daddr; ipv6_hdr(skb)->daddr = daddr; skb_dst_drop(skb); ip6_route_input(skb); if (skb_dst(skb)->error) { skb_push(skb, skb->data - skb_network_header(skb)); dst_input(skb); return -1; } if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); return -1; } ipv6_hdr(skb)->hop_limit--; goto looped_back; } skb_push(skb, skb->data - skb_network_header(skb)); dst_input(skb); return -1; unknown_rh: IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb_network_header(skb)); return -1; } static const struct inet6_protocol rthdr_protocol = { .handler = ipv6_rthdr_rcv, .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR, }; static const struct inet6_protocol destopt_protocol = { .handler = ipv6_destopt_rcv, .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR, }; static const struct inet6_protocol nodata_protocol = { .handler = dst_discard, .flags = INET6_PROTO_NOPOLICY, }; int __init ipv6_exthdrs_init(void) { int ret; ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING); if (ret) goto out; ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS); if (ret) goto out_rthdr; ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE); if (ret) goto out_destopt; out: return ret; out_rthdr: inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); out_destopt: inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); goto out; }; void ipv6_exthdrs_exit(void) { inet6_del_protocol(&nodata_protocol, IPPROTO_NONE); inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); } /********************************** Hop-by-hop options. **********************************/ /* * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input(). */ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) { return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); } static inline struct net *ipv6_skb_net(struct sk_buff *skb) { return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev); } /* Router Alert as of RFC 2711 */ static int ipv6_hop_ra(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); if (nh[optoff + 1] == 2) { IP6CB(skb)->ra = optoff; return 1; } LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", nh[optoff + 1]); kfree_skb(skb); return 0; } /* Jumbo payload */ static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); struct net *net = ipv6_skb_net(skb); u32 pkt_len; if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", nh[optoff+1]); IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); goto drop; } pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); if (pkt_len <= IPV6_MAXPLEN) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); return 0; } if (ipv6_hdr(skb)->payload_len) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); return 0; } if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto drop; return 1; drop: kfree_skb(skb); return 0; } static struct tlvtype_proc tlvprochopopt_lst[] = { { .type = IPV6_TLV_ROUTERALERT, .func = ipv6_hop_ra, }, { .type = IPV6_TLV_JUMBO, .func = ipv6_hop_jumbo, }, { -1, } }; int ipv6_parse_hopopts(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); /* * skb_network_header(skb) is equal to skb->data, and * skb_network_header_len(skb) is always equal to * sizeof(struct ipv6hdr) by definition of * hop-by-hop options. */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) || !pskb_may_pull(skb, (sizeof(struct ipv6hdr) + ((skb_transport_header(skb)[1] + 1) << 3)))) { kfree_skb(skb); return -1; } opt->hop = sizeof(struct ipv6hdr); if (ip6_parse_tlv(tlvprochopopt_lst, skb)) { skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3; opt = IP6CB(skb); opt->nhoff = sizeof(struct ipv6hdr); return 1; } return -1; } /* * Creating outbound headers. * * "build" functions work when skb is filled from head to tail (datagram) * "push" functions work when headers are added from tail to head (tcp) * * In both cases we assume, that caller reserved enough room * for headers. */ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, struct ipv6_rt_hdr *opt, struct in6_addr **addr_p) { struct rt0_hdr *phdr, *ihdr; int hops; ihdr = (struct rt0_hdr *) opt; phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3); memcpy(phdr, ihdr, sizeof(struct rt0_hdr)); hops = ihdr->rt_hdr.hdrlen >> 1; if (hops > 1) memcpy(phdr->addr, ihdr->addr + 1, (hops - 1) * sizeof(struct in6_addr)); phdr->addr[hops - 1] = **addr_p; *addr_p = ihdr->addr; phdr->rt_hdr.nexthdr = *proto; *proto = NEXTHDR_ROUTING; } static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt) { struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt)); memcpy(h, opt, ipv6_optlen(opt)); h->nexthdr = *proto; *proto = type; } void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto, struct in6_addr **daddr) { if (opt->srcrt) { ipv6_push_rthdr(skb, proto, opt->srcrt, daddr); /* * IPV6_RTHDRDSTOPTS is ignored * unless IPV6_RTHDR is set (RFC3542). */ if (opt->dst0opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt); } if (opt->hopopt) ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); } EXPORT_SYMBOL(ipv6_push_nfrag_opts); void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) { if (opt->dst1opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt); } struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) { struct ipv6_txoptions *opt2; opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); if (opt2) { long dif = (char*)opt2 - (char*)opt; memcpy(opt2, opt, opt->tot_len); if (opt2->hopopt) *((char**)&opt2->hopopt) += dif; if (opt2->dst0opt) *((char**)&opt2->dst0opt) += dif; if (opt2->dst1opt) *((char**)&opt2->dst1opt) += dif; if (opt2->srcrt) *((char**)&opt2->srcrt) += dif; } return opt2; } EXPORT_SYMBOL_GPL(ipv6_dup_options); static int ipv6_renew_option(void *ohdr, struct ipv6_opt_hdr __user *newopt, int newoptlen, int inherit, struct ipv6_opt_hdr **hdr, char **p) { if (inherit) { if (ohdr) { memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr)); *hdr = (struct ipv6_opt_hdr *)*p; *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr)); } } else { if (newopt) { if (copy_from_user(*p, newopt, newoptlen)) return -EFAULT; *hdr = (struct ipv6_opt_hdr *)*p; if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen) return -EINVAL; *p += CMSG_ALIGN(newoptlen); } } return 0; } struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, struct ipv6_opt_hdr __user *newopt, int newoptlen) { int tot_len = 0; char *p; struct ipv6_txoptions *opt2; int err; if (opt) { if (newtype != IPV6_HOPOPTS && opt->hopopt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt)); if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt)); if (newtype != IPV6_RTHDR && opt->srcrt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt)); if (newtype != IPV6_DSTOPTS && opt->dst1opt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); } if (newopt && newoptlen) tot_len += CMSG_ALIGN(newoptlen); if (!tot_len) return NULL; tot_len += sizeof(*opt2); opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC); if (!opt2) return ERR_PTR(-ENOBUFS); memset(opt2, 0, tot_len); opt2->tot_len = tot_len; p = (char *)(opt2 + 1); err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen, newtype != IPV6_HOPOPTS, &opt2->hopopt, &p); if (err) goto out; err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen, newtype != IPV6_RTHDRDSTOPTS, &opt2->dst0opt, &p); if (err) goto out; err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen, newtype != IPV6_RTHDR, (struct ipv6_opt_hdr **)&opt2->srcrt, &p); if (err) goto out; err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen, newtype != IPV6_DSTOPTS, &opt2->dst1opt, &p); if (err) goto out; opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0); opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); return opt2; out: sock_kfree_s(sk, opt2, opt2->tot_len); return ERR_PTR(err); } struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt) { /* * ignore the dest before srcrt unless srcrt is being included. * --yoshfuji */ if (opt && opt->dst0opt && !opt->srcrt) { if (opt_space != opt) { memcpy(opt_space, opt, sizeof(*opt_space)); opt = opt_space; } opt->opt_nflen -= ipv6_optlen(opt->dst0opt); opt->dst0opt = NULL; } return opt; } /** * fl6_update_dst - update flowi destination address with info given * by srcrt option, if any. * * @fl6: flowi6 for which daddr is to be updated * @opt: struct ipv6_txoptions in which to look for srcrt opt * @orig: copy of original daddr address if modified * * Returns NULL if no txoptions or no srcrt, otherwise returns orig * and initial value of fl6->daddr set in orig */ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, const struct ipv6_txoptions *opt, struct in6_addr *orig) { if (!opt || !opt->srcrt) return NULL; *orig = fl6->daddr; fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; return orig; } EXPORT_SYMBOL_GPL(fl6_update_dst);
gpl-2.0
tositrino/linux
arch/mips/bcm47xx/nvram.c
4593
2807
/* * BCM947xx nvram variable access * * Copyright (C) 2005 Broadcom Corporation * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/ssb/ssb.h> #include <linux/kernel.h> #include <linux/string.h> #include <asm/addrspace.h> #include <asm/mach-bcm47xx/nvram.h> #include <asm/mach-bcm47xx/bcm47xx.h> static char nvram_buf[NVRAM_SPACE]; /* Probe for NVRAM header */ static void early_nvram_init(void) { #ifdef CONFIG_BCM47XX_SSB struct ssb_mipscore *mcore_ssb; #endif #ifdef CONFIG_BCM47XX_BCMA struct bcma_drv_cc *bcma_cc; #endif struct nvram_header *header; int i; u32 base = 0; u32 lim = 0; u32 off; u32 *src, *dst; switch (bcm47xx_bus_type) { #ifdef CONFIG_BCM47XX_SSB case BCM47XX_BUS_TYPE_SSB: mcore_ssb = &bcm47xx_bus.ssb.mipscore; base = mcore_ssb->flash_window; lim = mcore_ssb->flash_window_size; break; #endif #ifdef CONFIG_BCM47XX_BCMA case BCM47XX_BUS_TYPE_BCMA: bcma_cc = &bcm47xx_bus.bcma.bus.drv_cc; base = bcma_cc->pflash.window; lim = bcma_cc->pflash.window_size; break; #endif } off = FLASH_MIN; while (off <= lim) { /* Windowed flash access */ header = (struct nvram_header *) KSEG1ADDR(base + off - NVRAM_SPACE); if (header->magic == NVRAM_HEADER) goto found; off <<= 1; } /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ header = (struct nvram_header *) KSEG1ADDR(base + 4096); if (header->magic == NVRAM_HEADER) goto found; header = (struct nvram_header *) KSEG1ADDR(base + 1024); if (header->magic == NVRAM_HEADER) goto found; return; found: src = (u32 *) header; dst = (u32 *) nvram_buf; for (i = 0; i < sizeof(struct nvram_header); i += 4) *dst++ = *src++; for (; i < header->len && i < NVRAM_SPACE; i += 4) *dst++ = le32_to_cpu(*src++); } int nvram_getenv(char *name, char *val, size_t val_len) { char *var, *value, *end, *eq; if (!name) return NVRAM_ERR_INV_PARAM; if (!nvram_buf[0]) early_nvram_init(); /* Look for name=value and return value */ var = &nvram_buf[sizeof(struct nvram_header)]; end = nvram_buf + sizeof(nvram_buf) - 2; end[0] = end[1] = '\0'; for (; *var; var = value + strlen(value) + 1) { eq = strchr(var, '='); if (!eq) break; value = eq + 1; if ((eq - var) == strlen(name) && strncmp(var, name, (eq - var)) == 0) { return snprintf(val, val_len, "%s", value); } } return NVRAM_ERR_ENVNOTFOUND; } EXPORT_SYMBOL(nvram_getenv);
gpl-2.0
Abhinav1997/kernel_z3
drivers/acpi/bgrt.c
4849
3920
/* * Copyright 2012 Red Hat, Inc <mjg@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/sysfs.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> static struct acpi_table_bgrt *bgrt_tab; static struct kobject *bgrt_kobj; struct bmp_header { u16 id; u32 size; } __attribute ((packed)); static struct bmp_header bmp_header; static ssize_t show_version(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version); } static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static ssize_t show_status(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status); } static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type); } static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); static ssize_t show_xoffset(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x); } static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL); static ssize_t show_yoffset(struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y); } static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL); static ssize_t show_image(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { int size = attr->size; void __iomem *image = attr->private; if (off >= size) { count = 0; } else { if (off + count > size) count = size - off; memcpy_fromio(buf, image+off, count); } return count; } static struct bin_attribute image_attr = { .attr = { .name = "image", .mode = S_IRUGO, }, .read = show_image, }; static struct attribute *bgrt_attributes[] = { &dev_attr_version.attr, &dev_attr_status.attr, &dev_attr_type.attr, &dev_attr_xoffset.attr, &dev_attr_yoffset.attr, NULL, }; static struct attribute_group bgrt_attribute_group = { .attrs = bgrt_attributes, }; static int __init bgrt_init(void) { acpi_status status; int ret; void __iomem *bgrt; if (acpi_disabled) return -ENODEV; status = acpi_get_table("BGRT", 0, (struct acpi_table_header **)&bgrt_tab); if (ACPI_FAILURE(status)) return -ENODEV; sysfs_bin_attr_init(&image_attr); bgrt = ioremap(bgrt_tab->image_address, sizeof(struct bmp_header)); if (!bgrt) { ret = -EINVAL; goto out_err; } memcpy_fromio(&bmp_header, bgrt, sizeof(bmp_header)); image_attr.size = bmp_header.size; iounmap(bgrt); image_attr.private = ioremap(bgrt_tab->image_address, image_attr.size); if (!image_attr.private) { ret = -EINVAL; goto out_err; } bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj); if (!bgrt_kobj) { ret = -EINVAL; goto out_iounmap; } ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group); if (ret) goto out_kobject; ret = sysfs_create_bin_file(bgrt_kobj, &image_attr); if (ret) goto out_group; return 0; out_group: sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group); out_kobject: kobject_put(bgrt_kobj); out_iounmap: iounmap(image_attr.private); out_err: return ret; } static void __exit bgrt_exit(void) { iounmap(image_attr.private); sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group); sysfs_remove_bin_file(bgrt_kobj, &image_attr); } module_init(bgrt_init); module_exit(bgrt_exit); MODULE_AUTHOR("Matthew Garrett"); MODULE_DESCRIPTION("BGRT boot graphic support"); MODULE_LICENSE("GPL");
gpl-2.0
PatrikKT/KofilaKernel
arch/sparc/kernel/pci_schizo.c
4849
48989
/* pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support. * * Copyright (C) 2001, 2002, 2003, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/of_device.h> #include <asm/iommu.h> #include <asm/irq.h> #include <asm/pstate.h> #include <asm/prom.h> #include <asm/upa.h> #include "pci_impl.h" #include "iommu_common.h" #define DRIVER_NAME "schizo" #define PFX DRIVER_NAME ": " /* This is a convention that at least Excalibur and Merlin * follow. I suppose the SCHIZO used in Starcat and friends * will do similar. * * The only way I could see this changing is if the newlink * block requires more space in Schizo's address space than * they predicted, thus requiring an address space reorg when * the newer Schizo is taped out. */ /* Streaming buffer control register. */ #define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */ #define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */ #define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */ #define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ #define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */ /* IOMMU control register. */ #define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */ #define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */ #define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */ #define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */ #define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */ #define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ #define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */ #define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */ #define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */ #define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */ #define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */ #define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ /* Schizo config space address format is nearly identical to * that of PSYCHO: * * 32 24 23 16 15 11 10 8 7 2 1 0 * --------------------------------------------------------- * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 | * --------------------------------------------------------- */ #define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space) #define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \ (((unsigned long)(BUS) << 16) | \ ((unsigned long)(DEVFN) << 8) | \ ((unsigned long)(REG))) static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where) { if (!pbm) return NULL; bus -= pbm->pci_first_busno; return (void *) (SCHIZO_CONFIG_BASE(pbm) | SCHIZO_CONFIG_ENCODE(bus, devfn, where)); } /* SCHIZO error handling support. */ enum schizo_error_type { UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR }; static DEFINE_SPINLOCK(stc_buf_lock); static unsigned long stc_error_buf[128]; static unsigned long stc_tag_buf[16]; static unsigned long stc_line_buf[16]; #define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */ #define SCHIZO_CE_INO 0x31 /* Correctable ECC error */ #define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */ #define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */ #define SCHIZO_SERR_INO 0x34 /* Safari interface error */ #define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */ #define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */ #define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */ #define SCHIZO_STCERR_WRITE 0x2UL #define SCHIZO_STCERR_READ 0x1UL #define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL #define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL #define SCHIZO_STCTAG_VALID 0x8000000000000000UL #define SCHIZO_STCTAG_READ 0x4000000000000000UL #define SCHIZO_STCLINE_LINDX 0x0000000007800000UL #define SCHIZO_STCLINE_SPTR 0x000000000007e000UL #define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL #define SCHIZO_STCLINE_EPTR 0x000000000000003fUL #define SCHIZO_STCLINE_VALID 0x0000000000600000UL #define SCHIZO_STCLINE_FOFN 0x0000000000180000UL static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm, enum schizo_error_type type) { struct strbuf *strbuf = &pbm->stc; unsigned long regbase = pbm->pbm_regs; unsigned long err_base, tag_base, line_base; u64 control; int i; err_base = regbase + SCHIZO_STC_ERR; tag_base = regbase + SCHIZO_STC_TAG; line_base = regbase + SCHIZO_STC_LINE; spin_lock(&stc_buf_lock); /* This is __REALLY__ dangerous. When we put the * streaming buffer into diagnostic mode to probe * it's tags and error status, we _must_ clear all * of the line tag valid bits before re-enabling * the streaming buffer. If any dirty data lives * in the STC when we do this, we will end up * invalidating it before it has a chance to reach * main memory. */ control = upa_readq(strbuf->strbuf_control); upa_writeq((control | SCHIZO_STRBUF_CTRL_DENAB), strbuf->strbuf_control); for (i = 0; i < 128; i++) { unsigned long val; val = upa_readq(err_base + (i * 8UL)); upa_writeq(0UL, err_base + (i * 8UL)); stc_error_buf[i] = val; } for (i = 0; i < 16; i++) { stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL)); stc_line_buf[i] = upa_readq(line_base + (i * 8UL)); upa_writeq(0UL, tag_base + (i * 8UL)); upa_writeq(0UL, line_base + (i * 8UL)); } /* OK, state is logged, exit diagnostic mode. */ upa_writeq(control, strbuf->strbuf_control); for (i = 0; i < 16; i++) { int j, saw_error, first, last; saw_error = 0; first = i * 8; last = first + 8; for (j = first; j < last; j++) { unsigned long errval = stc_error_buf[j]; if (errval != 0) { saw_error++; printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n", pbm->name, j, (errval & SCHIZO_STCERR_WRITE) ? 1 : 0, (errval & SCHIZO_STCERR_READ) ? 1 : 0); } } if (saw_error != 0) { unsigned long tagval = stc_tag_buf[i]; unsigned long lineval = stc_line_buf[i]; printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n", pbm->name, i, ((tagval & SCHIZO_STCTAG_PPN) >> 19UL), (tagval & SCHIZO_STCTAG_VPN), ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0), ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0)); /* XXX Should spit out per-bank error information... -DaveM */ printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)" "V(%d)FOFN(%d)]\n", pbm->name, i, ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL), ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL), ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL), ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL), ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0), ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0)); } } spin_unlock(&stc_buf_lock); } /* IOMMU is per-PBM in Schizo, so interrogate both for anonymous * controller level errors. */ #define SCHIZO_IOMMU_TAG 0xa580UL #define SCHIZO_IOMMU_DATA 0xa600UL #define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL #define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL #define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL #define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL #define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL #define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL #define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL #define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL #define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL #define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, enum schizo_error_type type) { struct iommu *iommu = pbm->iommu; unsigned long iommu_tag[16]; unsigned long iommu_data[16]; unsigned long flags; u64 control; int i; spin_lock_irqsave(&iommu->lock, flags); control = upa_readq(iommu->iommu_control); if (control & SCHIZO_IOMMU_CTRL_XLTEERR) { unsigned long base; char *type_string; /* Clear the error encountered bit. */ control &= ~SCHIZO_IOMMU_CTRL_XLTEERR; upa_writeq(control, iommu->iommu_control); switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) { case 0: type_string = "Protection Error"; break; case 1: type_string = "Invalid Error"; break; case 2: type_string = "TimeOut Error"; break; case 3: default: type_string = "ECC Error"; break; } printk("%s: IOMMU Error, type[%s]\n", pbm->name, type_string); /* Put the IOMMU into diagnostic mode and probe * it's TLB for entries with error status. * * It is very possible for another DVMA to occur * while we do this probe, and corrupt the system * further. But we are so screwed at this point * that we are likely to crash hard anyways, so * get as much diagnostic information to the * console as we can. */ upa_writeq(control | SCHIZO_IOMMU_CTRL_DENAB, iommu->iommu_control); base = pbm->pbm_regs; for (i = 0; i < 16; i++) { iommu_tag[i] = upa_readq(base + SCHIZO_IOMMU_TAG + (i * 8UL)); iommu_data[i] = upa_readq(base + SCHIZO_IOMMU_DATA + (i * 8UL)); /* Now clear out the entry. */ upa_writeq(0, base + SCHIZO_IOMMU_TAG + (i * 8UL)); upa_writeq(0, base + SCHIZO_IOMMU_DATA + (i * 8UL)); } /* Leave diagnostic mode. */ upa_writeq(control, iommu->iommu_control); for (i = 0; i < 16; i++) { unsigned long tag, data; tag = iommu_tag[i]; if (!(tag & SCHIZO_IOMMU_TAG_ERR)) continue; data = iommu_data[i]; switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) { case 0: type_string = "Protection Error"; break; case 1: type_string = "Invalid Error"; break; case 2: type_string = "TimeOut Error"; break; case 3: default: type_string = "ECC Error"; break; } printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " "sz(%dK) vpg(%08lx)]\n", pbm->name, i, type_string, (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL), ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0), ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0), ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8), (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT); printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n", pbm->name, i, ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0), ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0), (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT); } } if (pbm->stc.strbuf_enabled) __schizo_check_stc_error_pbm(pbm, type); spin_unlock_irqrestore(&iommu->lock, flags); } static void schizo_check_iommu_error(struct pci_pbm_info *pbm, enum schizo_error_type type) { schizo_check_iommu_error_pbm(pbm, type); if (pbm->sibling) schizo_check_iommu_error_pbm(pbm->sibling, type); } /* Uncorrectable ECC error status gathering. */ #define SCHIZO_UE_AFSR 0x10030UL #define SCHIZO_UE_AFAR 0x10038UL #define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */ #define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */ #define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */ #define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */ #define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */ #define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */ #define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */ #define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */ #define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */ #define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */ #define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */ #define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */ #define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */ #define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */ static irqreturn_t schizo_ue_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; unsigned long afsr_reg = pbm->controller_regs + SCHIZO_UE_AFSR; unsigned long afar_reg = pbm->controller_regs + SCHIZO_UE_AFAR; unsigned long afsr, afar, error_bits; int reported, limit; /* Latch uncorrectable error status. */ afar = upa_readq(afar_reg); /* If either of the error pending bits are set in the * AFSR, the error status is being actively updated by * the hardware and we must re-read to get a clean value. */ limit = 1000; do { afsr = upa_readq(afsr_reg); } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); /* Clear the primary/secondary error status bits. */ error_bits = afsr & (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR | SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA); if (!error_bits) return IRQ_NONE; upa_writeq(error_bits, afsr_reg); /* Log the error. */ printk("%s: Uncorrectable Error, primary error type[%s]\n", pbm->name, (((error_bits & SCHIZO_UEAFSR_PPIO) ? "PIO" : ((error_bits & SCHIZO_UEAFSR_PDRD) ? "DMA Read" : ((error_bits & SCHIZO_UEAFSR_PDWR) ? "DMA Write" : "???"))))); printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", pbm->name, (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, (afsr & SCHIZO_UEAFSR_AID) >> 24UL); printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", pbm->name, (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); printk("%s: UE Secondary errors [", pbm->name); reported = 0; if (afsr & SCHIZO_UEAFSR_SPIO) { reported++; printk("(PIO)"); } if (afsr & SCHIZO_UEAFSR_SDMA) { reported++; printk("(DMA)"); } if (!reported) printk("(none)"); printk("]\n"); /* Interrogate IOMMU for error status. */ schizo_check_iommu_error(pbm, UE_ERR); return IRQ_HANDLED; } #define SCHIZO_CE_AFSR 0x10040UL #define SCHIZO_CE_AFAR 0x10048UL #define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL #define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL #define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL #define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL #define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL #define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL #define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL #define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL #define SCHIZO_CEAFSR_AID 0x000000001f000000UL #define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL #define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL #define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL #define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL #define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL static irqreturn_t schizo_ce_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; unsigned long afsr_reg = pbm->controller_regs + SCHIZO_CE_AFSR; unsigned long afar_reg = pbm->controller_regs + SCHIZO_CE_AFAR; unsigned long afsr, afar, error_bits; int reported, limit; /* Latch error status. */ afar = upa_readq(afar_reg); /* If either of the error pending bits are set in the * AFSR, the error status is being actively updated by * the hardware and we must re-read to get a clean value. */ limit = 1000; do { afsr = upa_readq(afsr_reg); } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit); /* Clear primary/secondary error status bits. */ error_bits = afsr & (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR | SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA); if (!error_bits) return IRQ_NONE; upa_writeq(error_bits, afsr_reg); /* Log the error. */ printk("%s: Correctable Error, primary error type[%s]\n", pbm->name, (((error_bits & SCHIZO_CEAFSR_PPIO) ? "PIO" : ((error_bits & SCHIZO_CEAFSR_PDRD) ? "DMA Read" : ((error_bits & SCHIZO_CEAFSR_PDWR) ? "DMA Write" : "???"))))); /* XXX Use syndrome and afar to print out module string just like * XXX UDB CE trap handler does... -DaveM */ printk("%s: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n", pbm->name, (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL, (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL, (afsr & SCHIZO_UEAFSR_AID) >> 24UL); printk("%s: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n", pbm->name, (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0, (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0, (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL, (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL, (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL); printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); printk("%s: CE Secondary errors [", pbm->name); reported = 0; if (afsr & SCHIZO_CEAFSR_SPIO) { reported++; printk("(PIO)"); } if (afsr & SCHIZO_CEAFSR_SDMA) { reported++; printk("(DMA)"); } if (!reported) printk("(none)"); printk("]\n"); return IRQ_HANDLED; } #define SCHIZO_PCI_AFSR 0x2010UL #define SCHIZO_PCI_AFAR 0x2018UL #define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */ #define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */ #define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */ #define SCHIZO_PCI_CTRL (0x2000UL) #define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ #define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */ #define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ #define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ #define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ #define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */ #define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */ #define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */ #define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */ #define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */ #define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_PTO_SHIFT 24UL #define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */ #define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */ #define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */ #define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */ #define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */ #define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */ #define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */ #define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */ #define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */ #define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) { unsigned long csr_reg, csr, csr_error_bits; irqreturn_t ret = IRQ_NONE; u16 stat; csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; csr = upa_readq(csr_reg); csr_error_bits = csr & (SCHIZO_PCICTRL_BUS_UNUS | SCHIZO_PCICTRL_TTO_ERR | SCHIZO_PCICTRL_RTRY_ERR | SCHIZO_PCICTRL_DTO_ERR | SCHIZO_PCICTRL_SBH_ERR | SCHIZO_PCICTRL_SERR); if (csr_error_bits) { /* Clear the errors. */ upa_writeq(csr, csr_reg); /* Log 'em. */ if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS) printk("%s: Bus unusable error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR) printk("%s: PCI TRDY# timeout error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR) printk("%s: PCI excessive retry error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR) printk("%s: PCI discard timeout error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR) printk("%s: PCI streaming byte hole error asserted.\n", pbm->name); if (csr_error_bits & SCHIZO_PCICTRL_SERR) printk("%s: PCI SERR signal asserted.\n", pbm->name); ret = IRQ_HANDLED; } pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); if (stat & (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_SYSTEM_ERROR)) { printk("%s: PCI bus error, PCI_STATUS[%04x]\n", pbm->name, stat); pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); ret = IRQ_HANDLED; } return ret; } static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; unsigned long afsr_reg, afar_reg, base; unsigned long afsr, afar, error_bits; int reported; base = pbm->pbm_regs; afsr_reg = base + SCHIZO_PCI_AFSR; afar_reg = base + SCHIZO_PCI_AFAR; /* Latch error status. */ afar = upa_readq(afar_reg); afsr = upa_readq(afsr_reg); /* Clear primary/secondary error status bits. */ error_bits = afsr & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS); if (!error_bits) return schizo_pcierr_intr_other(pbm); upa_writeq(error_bits, afsr_reg); /* Log the error. */ printk("%s: PCI Error, primary error type[%s]\n", pbm->name, (((error_bits & SCHIZO_PCIAFSR_PMA) ? "Master Abort" : ((error_bits & SCHIZO_PCIAFSR_PTA) ? "Target Abort" : ((error_bits & SCHIZO_PCIAFSR_PRTRY) ? "Excessive Retries" : ((error_bits & SCHIZO_PCIAFSR_PPERR) ? "Parity Error" : ((error_bits & SCHIZO_PCIAFSR_PTTO) ? "Timeout" : ((error_bits & SCHIZO_PCIAFSR_PUNUS) ? "Bus Unusable" : "???")))))))); printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n", pbm->name, (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL, (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0, ((afsr & SCHIZO_PCIAFSR_CFG) ? "Config" : ((afsr & SCHIZO_PCIAFSR_MEM) ? "Memory" : ((afsr & SCHIZO_PCIAFSR_IO) ? "I/O" : "???")))); printk("%s: PCI AFAR [%016lx]\n", pbm->name, afar); printk("%s: PCI Secondary errors [", pbm->name); reported = 0; if (afsr & SCHIZO_PCIAFSR_SMA) { reported++; printk("(Master Abort)"); } if (afsr & SCHIZO_PCIAFSR_STA) { reported++; printk("(Target Abort)"); } if (afsr & SCHIZO_PCIAFSR_SRTRY) { reported++; printk("(Excessive Retries)"); } if (afsr & SCHIZO_PCIAFSR_SPERR) { reported++; printk("(Parity Error)"); } if (afsr & SCHIZO_PCIAFSR_STTO) { reported++; printk("(Timeout)"); } if (afsr & SCHIZO_PCIAFSR_SUNUS) { reported++; printk("(Bus Unusable)"); } if (!reported) printk("(none)"); printk("]\n"); /* For the error types shown, scan PBM's PCI bus for devices * which have logged that error type. */ /* If we see a Target Abort, this could be the result of an * IOMMU translation error of some sort. It is extremely * useful to log this information as usually it indicates * a bug in the IOMMU support code or a PCI device driver. */ if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) { schizo_check_iommu_error(pbm, PCI_ERR); pci_scan_for_target_abort(pbm, pbm->pci_bus); } if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA)) pci_scan_for_master_abort(pbm, pbm->pci_bus); /* For excessive retries, PSYCHO/PBM will abort the device * and there is no way to specifically check for excessive * retries in the config space status registers. So what * we hope is that we'll catch it via the master/target * abort events. */ if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR)) pci_scan_for_parity_error(pbm, pbm->pci_bus); return IRQ_HANDLED; } #define SCHIZO_SAFARI_ERRLOG 0x10018UL #define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL #define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */ #define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */ #define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */ #define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */ #define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */ #define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */ #define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */ #define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */ #define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */ #define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */ #define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */ #define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */ #define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */ #define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */ #define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */ #define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */ #define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */ #define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */ #define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */ #define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */ #define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */ #define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */ #define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */ #define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */ #define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */ #define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */ /* We only expect UNMAP errors here. The rest of the Safari errors * are marked fatal and thus cause a system reset. */ static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; u64 errlog; errlog = upa_readq(pbm->controller_regs + SCHIZO_SAFARI_ERRLOG); upa_writeq(errlog & ~(SAFARI_ERRLOG_ERROUT), pbm->controller_regs + SCHIZO_SAFARI_ERRLOG); if (!(errlog & BUS_ERROR_UNMAP)) { printk("%s: Unexpected Safari/JBUS error interrupt, errlog[%016llx]\n", pbm->name, errlog); return IRQ_HANDLED; } printk("%s: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n", pbm->name); schizo_check_iommu_error(pbm, SAFARI_ERR); return IRQ_HANDLED; } /* Nearly identical to PSYCHO equivalents... */ #define SCHIZO_ECC_CTRL 0x10020UL #define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ #define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ #define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ #define SCHIZO_SAFARI_ERRCTRL 0x10008UL #define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL #define SCHIZO_SAFARI_IRQCTRL 0x10010UL #define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL static int pbm_routes_this_ino(struct pci_pbm_info *pbm, u32 ino) { ino &= IMAP_INO; if (pbm->ino_bitmap & (1UL << ino)) return 1; return 0; } /* How the Tomatillo IRQs are routed around is pure guesswork here. * * All the Tomatillo devices I see in prtconf dumps seem to have only * a single PCI bus unit attached to it. It would seem they are separate * devices because their PortID (ie. JBUS ID) values are all different * and thus the registers are mapped to totally different locations. * * However, two Tomatillo's look "similar" in that the only difference * in their PortID is the lowest bit. * * So if we were to ignore this lower bit, it certainly looks like two * PCI bus units of the same Tomatillo. I still have not really * figured this out... */ static void tomatillo_register_error_handlers(struct pci_pbm_info *pbm) { struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node); u64 tmp, err_mask, err_no_mask; int err; /* Tomatillo IRQ property layout is: * 0: PCIERR * 1: UE ERR * 2: CE ERR * 3: SERR * 4: POWER FAIL? */ if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) { err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0, "TOMATILLO_UE", pbm); if (err) printk(KERN_WARNING "%s: Could not register UE, " "err=%d\n", pbm->name, err); } if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) { err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0, "TOMATILLO_CE", pbm); if (err) printk(KERN_WARNING "%s: Could not register CE, " "err=%d\n", pbm->name, err); } err = 0; if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) { err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, "TOMATILLO_PCIERR", pbm); } else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) { err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, "TOMATILLO_PCIERR", pbm); } if (err) printk(KERN_WARNING "%s: Could not register PCIERR, " "err=%d\n", pbm->name, err); if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) { err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0, "TOMATILLO_SERR", pbm); if (err) printk(KERN_WARNING "%s: Could not register SERR, " "err=%d\n", pbm->name, err); } /* Enable UE and CE interrupts for controller. */ upa_writeq((SCHIZO_ECCCTRL_EE | SCHIZO_ECCCTRL_UE | SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL); /* Enable PCI Error interrupts and clear error * bits. */ err_mask = (SCHIZO_PCICTRL_BUS_UNUS | SCHIZO_PCICTRL_TTO_ERR | SCHIZO_PCICTRL_RTRY_ERR | SCHIZO_PCICTRL_SERR | SCHIZO_PCICTRL_EEN); err_no_mask = SCHIZO_PCICTRL_DTO_ERR; tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); tmp |= err_mask; tmp &= ~err_no_mask; upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | SCHIZO_PCIAFSR_STTO); upa_writeq(err_mask, pbm->pbm_regs + SCHIZO_PCI_AFSR); err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR | BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD | BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA | BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO | BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR | BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B | BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR | BUS_ERROR_APERR | BUS_ERROR_UNMAP | BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT); upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask), pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL); upa_writeq((SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)), pbm->controller_regs + SCHIZO_SAFARI_IRQCTRL); } static void schizo_register_error_handlers(struct pci_pbm_info *pbm) { struct platform_device *op = of_find_device_by_node(pbm->op->dev.of_node); u64 tmp, err_mask, err_no_mask; int err; /* Schizo IRQ property layout is: * 0: PCIERR * 1: UE ERR * 2: CE ERR * 3: SERR * 4: POWER FAIL? */ if (pbm_routes_this_ino(pbm, SCHIZO_UE_INO)) { err = request_irq(op->archdata.irqs[1], schizo_ue_intr, 0, "SCHIZO_UE", pbm); if (err) printk(KERN_WARNING "%s: Could not register UE, " "err=%d\n", pbm->name, err); } if (pbm_routes_this_ino(pbm, SCHIZO_CE_INO)) { err = request_irq(op->archdata.irqs[2], schizo_ce_intr, 0, "SCHIZO_CE", pbm); if (err) printk(KERN_WARNING "%s: Could not register CE, " "err=%d\n", pbm->name, err); } err = 0; if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_A_INO)) { err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, "SCHIZO_PCIERR", pbm); } else if (pbm_routes_this_ino(pbm, SCHIZO_PCIERR_B_INO)) { err = request_irq(op->archdata.irqs[0], schizo_pcierr_intr, 0, "SCHIZO_PCIERR", pbm); } if (err) printk(KERN_WARNING "%s: Could not register PCIERR, " "err=%d\n", pbm->name, err); if (pbm_routes_this_ino(pbm, SCHIZO_SERR_INO)) { err = request_irq(op->archdata.irqs[3], schizo_safarierr_intr, 0, "SCHIZO_SERR", pbm); if (err) printk(KERN_WARNING "%s: Could not register SERR, " "err=%d\n", pbm->name, err); } /* Enable UE and CE interrupts for controller. */ upa_writeq((SCHIZO_ECCCTRL_EE | SCHIZO_ECCCTRL_UE | SCHIZO_ECCCTRL_CE), pbm->controller_regs + SCHIZO_ECC_CTRL); err_mask = (SCHIZO_PCICTRL_BUS_UNUS | SCHIZO_PCICTRL_ESLCK | SCHIZO_PCICTRL_TTO_ERR | SCHIZO_PCICTRL_RTRY_ERR | SCHIZO_PCICTRL_SBH_ERR | SCHIZO_PCICTRL_SERR | SCHIZO_PCICTRL_EEN); err_no_mask = (SCHIZO_PCICTRL_DTO_ERR | SCHIZO_PCICTRL_SBH_INT); /* Enable PCI Error interrupts and clear error * bits for each PBM. */ tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); tmp |= err_mask; tmp &= ~err_no_mask; upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); upa_writeq((SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS | SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA | SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR | SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS), pbm->pbm_regs + SCHIZO_PCI_AFSR); /* Make all Safari error conditions fatal except unmapped * errors which we make generate interrupts. */ err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS | BUS_ERROR_BADMA | BUS_ERROR_BADMB | BUS_ERROR_BADMC | BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB | BUS_ERROR_CIQTO | BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO | BUS_ERROR_UFPQTO | BUS_ERROR_APERR | BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT | BUS_ERROR_ILL); #if 1 /* XXX Something wrong with some Excalibur systems * XXX Sun is shipping. The behavior on a 2-cpu * XXX machine is that both CPU1 parity error bits * XXX are set and are immediately set again when * XXX their error status bits are cleared. Just * XXX ignore them for now. -DaveM */ err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB | BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB); #endif upa_writeq((SCHIZO_SAFERRCTRL_EN | err_mask), pbm->controller_regs + SCHIZO_SAFARI_ERRCTRL); } static void pbm_config_busmastering(struct pci_pbm_info *pbm) { u8 *addr; /* Set cache-line size to 64 bytes, this is actually * a nop but I do it for completeness. */ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, 0, PCI_CACHE_LINE_SIZE); pci_config_write8(addr, 64 / sizeof(u32)); /* Set PBM latency timer to 64 PCI clocks. */ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno, 0, PCI_LATENCY_TIMER); pci_config_write8(addr, 64); } static void __devinit schizo_scan_bus(struct pci_pbm_info *pbm, struct device *parent) { pbm_config_busmastering(pbm); pbm->is_66mhz_capable = (of_find_property(pbm->op->dev.of_node, "66mhz-capable", NULL) != NULL); pbm->pci_bus = pci_scan_one_pbm(pbm, parent); if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) tomatillo_register_error_handlers(pbm); else schizo_register_error_handlers(pbm); } #define SCHIZO_STRBUF_CONTROL (0x02800UL) #define SCHIZO_STRBUF_FLUSH (0x02808UL) #define SCHIZO_STRBUF_FSYNC (0x02810UL) #define SCHIZO_STRBUF_CTXFLUSH (0x02818UL) #define SCHIZO_STRBUF_CTXMATCH (0x10000UL) static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) { unsigned long base = pbm->pbm_regs; u64 control; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { /* TOMATILLO lacks streaming cache. */ return; } /* SCHIZO has context flushing. */ pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL; pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH; pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC; pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH; pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH; pbm->stc.strbuf_flushflag = (volatile unsigned long *) ((((unsigned long)&pbm->stc.__flushflag_buf[0]) + 63UL) & ~63UL); pbm->stc.strbuf_flushflag_pa = (unsigned long) __pa(pbm->stc.strbuf_flushflag); /* Turn off LRU locking and diag mode, enable the * streaming buffer and leave the rerun-disable * setting however OBP set it. */ control = upa_readq(pbm->stc.strbuf_control); control &= ~(SCHIZO_STRBUF_CTRL_LPTR | SCHIZO_STRBUF_CTRL_LENAB | SCHIZO_STRBUF_CTRL_DENAB); control |= SCHIZO_STRBUF_CTRL_ENAB; upa_writeq(control, pbm->stc.strbuf_control); pbm->stc.strbuf_enabled = 1; } #define SCHIZO_IOMMU_CONTROL (0x00200UL) #define SCHIZO_IOMMU_TSBBASE (0x00208UL) #define SCHIZO_IOMMU_FLUSH (0x00210UL) #define SCHIZO_IOMMU_CTXFLUSH (0x00218UL) static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm) { static const u32 vdma_default[] = { 0xc0000000, 0x40000000 }; unsigned long i, tagbase, database; struct iommu *iommu = pbm->iommu; int tsbsize, err; const u32 *vdma; u32 dma_mask; u64 control; vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); if (!vdma) vdma = vdma_default; dma_mask = vdma[0]; switch (vdma[1]) { case 0x20000000: dma_mask |= 0x1fffffff; tsbsize = 64; break; case 0x40000000: dma_mask |= 0x3fffffff; tsbsize = 128; break; case 0x80000000: dma_mask |= 0x7fffffff; tsbsize = 128; break; default: printk(KERN_ERR PFX "Strange virtual-dma size.\n"); return -EINVAL; } /* Register addresses, SCHIZO has iommu ctx flushing. */ iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH; iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL); iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH; /* We use the main control/status register of SCHIZO as the write * completion register. */ iommu->write_complete_reg = pbm->controller_regs + 0x10000UL; /* * Invalidate TLB Entries. */ control = upa_readq(iommu->iommu_control); control |= SCHIZO_IOMMU_CTRL_DENAB; upa_writeq(control, iommu->iommu_control); tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA; for (i = 0; i < 16; i++) { upa_writeq(0, pbm->pbm_regs + tagbase + (i * 8UL)); upa_writeq(0, pbm->pbm_regs + database + (i * 8UL)); } /* Leave diag mode enabled for full-flushing done * in pci_iommu.c */ err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask, pbm->numa_node); if (err) { printk(KERN_ERR PFX "iommu_table_init() fails with %d\n", err); return err; } upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); control = upa_readq(iommu->iommu_control); control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); switch (tsbsize) { case 64: control |= SCHIZO_IOMMU_TSBSZ_64K; break; case 128: control |= SCHIZO_IOMMU_TSBSZ_128K; break; } control |= SCHIZO_IOMMU_CTRL_ENAB; upa_writeq(control, iommu->iommu_control); return 0; } #define SCHIZO_PCI_IRQ_RETRY (0x1a00UL) #define SCHIZO_IRQ_RETRY_INF 0xffUL #define SCHIZO_PCI_DIAG (0x2020UL) #define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */ #define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */ #define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */ #define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */ #define TOMATILLO_PCI_IOC_CSR (0x2248UL) #define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL #define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL #define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL #define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL #define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL #define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL #define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL #define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL #define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL #define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL #define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL #define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL #define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL #define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL #define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL #define TOMATILLO_PCI_IOC_TDIAG (0x2250UL) #define TOMATILLO_PCI_IOC_DDIAG (0x2290UL) static void schizo_pbm_hw_init(struct pci_pbm_info *pbm) { u64 tmp; upa_writeq(5, pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY); tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_CTRL); /* Enable arbiter for all PCI slots. */ tmp |= 0xff; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && pbm->chip_version >= 0x2) tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; if (!of_find_property(pbm->op->dev.of_node, "no-bus-parking", NULL)) tmp |= SCHIZO_PCICTRL_PARK; else tmp &= ~SCHIZO_PCICTRL_PARK; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && pbm->chip_version <= 0x1) tmp |= SCHIZO_PCICTRL_DTO_INT; else tmp &= ~SCHIZO_PCICTRL_DTO_INT; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) tmp |= (SCHIZO_PCICTRL_MRM_PREF | SCHIZO_PCICTRL_RDO_PREF | SCHIZO_PCICTRL_RDL_PREF); upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_CTRL); tmp = upa_readq(pbm->pbm_regs + SCHIZO_PCI_DIAG); tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB | SCHIZO_PCIDIAG_D_RETRY | SCHIZO_PCIDIAG_D_INTSYNC); upa_writeq(tmp, pbm->pbm_regs + SCHIZO_PCI_DIAG); if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { /* Clear prefetch lengths to workaround a bug in * Jalapeno... */ tmp = (TOMATILLO_IOC_PART_WPENAB | (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) | TOMATILLO_IOC_RDMULT_CPENAB | TOMATILLO_IOC_RDONE_CPENAB | TOMATILLO_IOC_RDLINE_CPENAB); upa_writeq(tmp, pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR); } } static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op, u32 portid, int chip_type) { const struct linux_prom64_registers *regs; struct device_node *dp = op->dev.of_node; const char *chipset_name; int err; switch (chip_type) { case PBM_CHIP_TYPE_TOMATILLO: chipset_name = "TOMATILLO"; break; case PBM_CHIP_TYPE_SCHIZO_PLUS: chipset_name = "SCHIZO+"; break; case PBM_CHIP_TYPE_SCHIZO: default: chipset_name = "SCHIZO"; break; } /* For SCHIZO, three OBP regs: * 1) PBM controller regs * 2) Schizo front-end controller regs (same for both PBMs) * 3) PBM PCI config space * * For TOMATILLO, four OBP regs: * 1) PBM controller regs * 2) Tomatillo front-end controller regs * 3) PBM PCI config space * 4) Ichip regs */ regs = of_get_property(dp, "reg", NULL); pbm->next = pci_pbm_root; pci_pbm_root = pbm; pbm->numa_node = -1; pbm->pci_ops = &sun4u_pci_ops; pbm->config_space_reg_bits = 8; pbm->index = pci_num_pbms++; pbm->portid = portid; pbm->op = op; pbm->chip_type = chip_type; pbm->chip_version = of_getintprop_default(dp, "version#", 0); pbm->chip_revision = of_getintprop_default(dp, "module-version#", 0); pbm->pbm_regs = regs[0].phys_addr; pbm->controller_regs = regs[1].phys_addr - 0x10000UL; if (chip_type == PBM_CHIP_TYPE_TOMATILLO) pbm->sync_reg = regs[3].phys_addr + 0x1a18UL; pbm->name = dp->full_name; printk("%s: %s PCI Bus Module ver[%x:%x]\n", pbm->name, chipset_name, pbm->chip_version, pbm->chip_revision); schizo_pbm_hw_init(pbm); pci_determine_mem_io_space(pbm); pci_get_pbm_props(pbm); err = schizo_pbm_iommu_init(pbm); if (err) return err; schizo_pbm_strbuf_init(pbm); schizo_scan_bus(pbm, &op->dev); return 0; } static inline int portid_compare(u32 x, u32 y, int chip_type) { if (chip_type == PBM_CHIP_TYPE_TOMATILLO) { if (x == (y ^ 1)) return 1; return 0; } return (x == y); } static struct pci_pbm_info * __devinit schizo_find_sibling(u32 portid, int chip_type) { struct pci_pbm_info *pbm; for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { if (portid_compare(pbm->portid, portid, chip_type)) return pbm; } return NULL; } static int __devinit __schizo_init(struct platform_device *op, unsigned long chip_type) { struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; struct iommu *iommu; u32 portid; int err; portid = of_getintprop_default(dp, "portid", 0xff); err = -ENOMEM; pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); if (!pbm) { printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); goto out_err; } pbm->sibling = schizo_find_sibling(portid, chip_type); iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); if (!iommu) { printk(KERN_ERR PFX "Cannot allocate PBM A iommu.\n"); goto out_free_pbm; } pbm->iommu = iommu; if (schizo_pbm_init(pbm, op, portid, chip_type)) goto out_free_iommu; if (pbm->sibling) pbm->sibling->sibling = pbm; dev_set_drvdata(&op->dev, pbm); return 0; out_free_iommu: kfree(pbm->iommu); out_free_pbm: kfree(pbm); out_err: return err; } static const struct of_device_id schizo_match[]; static int __devinit schizo_probe(struct platform_device *op) { const struct of_device_id *match; match = of_match_device(schizo_match, &op->dev); if (!match) return -EINVAL; return __schizo_init(op, (unsigned long)match->data); } /* The ordering of this table is very important. Some Tomatillo * nodes announce that they are compatible with both pci108e,a801 * and pci108e,8001. So list the chips in reverse chronological * order. */ static const struct of_device_id schizo_match[] = { { .name = "pci", .compatible = "pci108e,a801", .data = (void *) PBM_CHIP_TYPE_TOMATILLO, }, { .name = "pci", .compatible = "pci108e,8002", .data = (void *) PBM_CHIP_TYPE_SCHIZO_PLUS, }, { .name = "pci", .compatible = "pci108e,8001", .data = (void *) PBM_CHIP_TYPE_SCHIZO, }, {}, }; static struct platform_driver schizo_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = schizo_match, }, .probe = schizo_probe, }; static int __init schizo_init(void) { return platform_driver_register(&schizo_driver); } subsys_initcall(schizo_init);
gpl-2.0
TeamHackLG/lge-kernel-lproj
sound/pci/hda/hda_beep.c
5105
6896
/* * Digital Beep Input Interface for HD-audio codec * * Author: Matthew Ranostay <mranostay@embeddedalley.com> * Copyright (c) 2008 Embedded Alley Solutions Inc * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/input.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/export.h> #include <sound/core.h> #include "hda_beep.h" #include "hda_local.h" enum { DIGBEEP_HZ_STEP = 46875, /* 46.875 Hz */ DIGBEEP_HZ_MIN = 93750, /* 93.750 Hz */ DIGBEEP_HZ_MAX = 12000000, /* 12 KHz */ }; static void snd_hda_generate_beep(struct work_struct *work) { struct hda_beep *beep = container_of(work, struct hda_beep, beep_work); struct hda_codec *codec = beep->codec; if (!beep->enabled) return; /* generate tone */ snd_hda_codec_write(codec, beep->nid, 0, AC_VERB_SET_BEEP_CONTROL, beep->tone); } /* (non-standard) Linear beep tone calculation for IDT/STAC codecs * * The tone frequency of beep generator on IDT/STAC codecs is * defined from the 8bit tone parameter, in Hz, * freq = 48000 * (257 - tone) / 1024 * that is from 12kHz to 93.75Hz in steps of 46.875 Hz */ static int beep_linear_tone(struct hda_beep *beep, int hz) { if (hz <= 0) return 0; hz *= 1000; /* fixed point */ hz = hz - DIGBEEP_HZ_MIN + DIGBEEP_HZ_STEP / 2; /* round to nearest step */ if (hz < 0) hz = 0; /* turn off PC beep*/ else if (hz >= (DIGBEEP_HZ_MAX - DIGBEEP_HZ_MIN)) hz = 1; /* max frequency */ else { hz /= DIGBEEP_HZ_STEP; hz = 255 - hz; } return hz; } /* HD-audio standard beep tone parameter calculation * * The tone frequency in Hz is calculated as * freq = 48000 / (tone * 4) * from 47Hz to 12kHz */ static int beep_standard_tone(struct hda_beep *beep, int hz) { if (hz <= 0) return 0; /* disabled */ hz = 12000 / hz; if (hz > 0xff) return 0xff; if (hz <= 0) return 1; return hz; } static int snd_hda_beep_event(struct input_dev *dev, unsigned int type, unsigned int code, int hz) { struct hda_beep *beep = input_get_drvdata(dev); switch (code) { case SND_BELL: if (hz) hz = 1000; case SND_TONE: if (beep->linear_tone) beep->tone = beep_linear_tone(beep, hz); else beep->tone = beep_standard_tone(beep, hz); break; default: return -1; } /* schedule beep event */ schedule_work(&beep->beep_work); return 0; } static void snd_hda_do_detach(struct hda_beep *beep) { input_unregister_device(beep->dev); beep->dev = NULL; cancel_work_sync(&beep->beep_work); /* turn off beep for sure */ snd_hda_codec_write(beep->codec, beep->nid, 0, AC_VERB_SET_BEEP_CONTROL, 0); } static int snd_hda_do_attach(struct hda_beep *beep) { struct input_dev *input_dev; struct hda_codec *codec = beep->codec; int err; input_dev = input_allocate_device(); if (!input_dev) { printk(KERN_INFO "hda_beep: unable to allocate input device\n"); return -ENOMEM; } /* setup digital beep device */ input_dev->name = "HDA Digital PCBeep"; input_dev->phys = beep->phys; input_dev->id.bustype = BUS_PCI; input_dev->id.vendor = codec->vendor_id >> 16; input_dev->id.product = codec->vendor_id & 0xffff; input_dev->id.version = 0x01; input_dev->evbit[0] = BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); input_dev->event = snd_hda_beep_event; input_dev->dev.parent = &codec->bus->pci->dev; input_set_drvdata(input_dev, beep); err = input_register_device(input_dev); if (err < 0) { input_free_device(input_dev); printk(KERN_INFO "hda_beep: unable to register input device\n"); return err; } beep->dev = input_dev; return 0; } static void snd_hda_do_register(struct work_struct *work) { struct hda_beep *beep = container_of(work, struct hda_beep, register_work); mutex_lock(&beep->mutex); if (beep->enabled && !beep->dev) snd_hda_do_attach(beep); mutex_unlock(&beep->mutex); } static void snd_hda_do_unregister(struct work_struct *work) { struct hda_beep *beep = container_of(work, struct hda_beep, unregister_work.work); mutex_lock(&beep->mutex); if (!beep->enabled && beep->dev) snd_hda_do_detach(beep); mutex_unlock(&beep->mutex); } int snd_hda_enable_beep_device(struct hda_codec *codec, int enable) { struct hda_beep *beep = codec->beep; enable = !!enable; if (beep == NULL) return 0; if (beep->enabled != enable) { beep->enabled = enable; if (!enable) { /* turn off beep */ snd_hda_codec_write(beep->codec, beep->nid, 0, AC_VERB_SET_BEEP_CONTROL, 0); } if (beep->mode == HDA_BEEP_MODE_SWREG) { if (enable) { cancel_delayed_work(&beep->unregister_work); schedule_work(&beep->register_work); } else { schedule_delayed_work(&beep->unregister_work, HZ); } } return 1; } return 0; } EXPORT_SYMBOL_HDA(snd_hda_enable_beep_device); int snd_hda_attach_beep_device(struct hda_codec *codec, int nid) { struct hda_beep *beep; if (!snd_hda_get_bool_hint(codec, "beep")) return 0; /* disabled explicitly by hints */ if (codec->beep_mode == HDA_BEEP_MODE_OFF) return 0; /* disabled by module option */ beep = kzalloc(sizeof(*beep), GFP_KERNEL); if (beep == NULL) return -ENOMEM; snprintf(beep->phys, sizeof(beep->phys), "card%d/codec#%d/beep0", codec->bus->card->number, codec->addr); /* enable linear scale */ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_DIGI_CONVERT_2, 0x01); beep->nid = nid; beep->codec = codec; beep->mode = codec->beep_mode; codec->beep = beep; INIT_WORK(&beep->register_work, &snd_hda_do_register); INIT_DELAYED_WORK(&beep->unregister_work, &snd_hda_do_unregister); INIT_WORK(&beep->beep_work, &snd_hda_generate_beep); mutex_init(&beep->mutex); if (beep->mode == HDA_BEEP_MODE_ON) { int err = snd_hda_do_attach(beep); if (err < 0) { kfree(beep); codec->beep = NULL; return err; } } return 0; } EXPORT_SYMBOL_HDA(snd_hda_attach_beep_device); void snd_hda_detach_beep_device(struct hda_codec *codec) { struct hda_beep *beep = codec->beep; if (beep) { cancel_work_sync(&beep->register_work); cancel_delayed_work(&beep->unregister_work); if (beep->dev) snd_hda_do_detach(beep); codec->beep = NULL; kfree(beep); } } EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device);
gpl-2.0
faux123/Galaxy_S5
net/ipv4/inet_hashtables.c
6129
15354
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Generic INET transport hashtables * * Authors: Lotsa people, from code originally in tcp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/wait.h> #include <net/inet_connection_sock.h> #include <net/inet_hashtables.h> #include <net/secure_seq.h> #include <net/ip.h> /* * Allocate and initialize a new local port bind bucket. * The bindhash mutex for snum's hash chain must be held here. */ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, const unsigned short snum) { struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); if (tb != NULL) { write_pnet(&tb->ib_net, hold_net(net)); tb->port = snum; tb->fastreuse = 0; tb->num_owners = 0; INIT_HLIST_HEAD(&tb->owners); hlist_add_head(&tb->node, &head->chain); } return tb; } /* * Caller must hold hashbucket lock for this tb with local BH disabled */ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) { if (hlist_empty(&tb->owners)) { __hlist_del(&tb->node); release_net(ib_net(tb)); kmem_cache_free(cachep, tb); } } void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, const unsigned short snum) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; atomic_inc(&hashinfo->bsockets); inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &tb->owners); tb->num_owners++; inet_csk(sk)->icsk_bind_hash = tb; } /* * Get rid of any references to a local port held by the given sock. */ static void __inet_put_port(struct sock *sk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, hashinfo->bhash_size); struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; struct inet_bind_bucket *tb; atomic_dec(&hashinfo->bsockets); spin_lock(&head->lock); tb = inet_csk(sk)->icsk_bind_hash; __sk_del_bind_node(sk); tb->num_owners--; inet_csk(sk)->icsk_bind_hash = NULL; inet_sk(sk)->inet_num = 0; inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); spin_unlock(&head->lock); } void inet_put_port(struct sock *sk) { local_bh_disable(); __inet_put_port(sk); local_bh_enable(); } EXPORT_SYMBOL(inet_put_port); int __inet_inherit_port(struct sock *sk, struct sock *child) { struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; unsigned short port = inet_sk(child)->inet_num; const int bhash = inet_bhashfn(sock_net(sk), port, table->bhash_size); struct inet_bind_hashbucket *head = &table->bhash[bhash]; struct inet_bind_bucket *tb; spin_lock(&head->lock); tb = inet_csk(sk)->icsk_bind_hash; if (tb->port != port) { /* NOTE: using tproxy and redirecting skbs to a proxy * on a different listener port breaks the assumption * that the listener socket's icsk_bind_hash is the same * as that of the child socket. We have to look up or * create a new bind bucket for the child here. */ struct hlist_node *node; inet_bind_bucket_for_each(tb, node, &head->chain) { if (net_eq(ib_net(tb), sock_net(sk)) && tb->port == port) break; } if (!node) { tb = inet_bind_bucket_create(table->bind_bucket_cachep, sock_net(sk), head, port); if (!tb) { spin_unlock(&head->lock); return -ENOMEM; } } } inet_bind_hash(child, tb, port); spin_unlock(&head->lock); return 0; } EXPORT_SYMBOL_GPL(__inet_inherit_port); static inline int compute_score(struct sock *sk, struct net *net, const unsigned short hnum, const __be32 daddr, const int dif) { int score = -1; struct inet_sock *inet = inet_sk(sk); if (net_eq(sock_net(sk), net) && inet->inet_num == hnum && !ipv6_only_sock(sk)) { __be32 rcv_saddr = inet->inet_rcv_saddr; score = sk->sk_family == PF_INET ? 1 : 0; if (rcv_saddr) { if (rcv_saddr != daddr) return -1; score += 2; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score += 2; } } return score; } /* * Don't inline this cruft. Here are some nice properties to exploit here. The * BSD API does not allow a listening sock to specify the remote port nor the * remote address for the connection. So always assume those are both * wildcarded during the search since they can never be otherwise. */ struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, const __be32 daddr, const unsigned short hnum, const int dif) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned int hash = inet_lhashfn(net, hnum); struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; int score, hiscore; rcu_read_lock(); begin: result = NULL; hiscore = -1; sk_nulls_for_each_rcu(sk, node, &ilb->head) { score = compute_score(sk, net, hnum, daddr, dif); if (score > hiscore) { result = sk; hiscore = score; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) result = NULL; else if (unlikely(compute_score(result, net, hnum, daddr, dif) < hiscore)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__inet_lookup_listener); struct sock * __inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 hnum, const int dif) { INET_ADDR_COOKIE(acookie, saddr, daddr) const __portpair ports = INET_COMBINED_PORTS(sport, hnum); struct sock *sk; const struct hlist_nulls_node *node; /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); unsigned int slot = hash & hashinfo->ehash_mask; struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; rcu_read_lock(); begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { if (INET_MATCH(sk, net, hash, acookie, saddr, daddr, ports, dif)) { if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) goto begintw; if (unlikely(!INET_MATCH(sk, net, hash, acookie, saddr, daddr, ports, dif))) { sock_put(sk); goto begin; } goto out; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; begintw: /* Must check for a TIME_WAIT'er before going to listener hash. */ sk_nulls_for_each_rcu(sk, node, &head->twchain) { if (INET_TW_MATCH(sk, net, hash, acookie, saddr, daddr, ports, dif)) { if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) { sk = NULL; goto out; } if (unlikely(!INET_TW_MATCH(sk, net, hash, acookie, saddr, daddr, ports, dif))) { sock_put(sk); goto begintw; } goto out; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begintw; sk = NULL; out: rcu_read_unlock(); return sk; } EXPORT_SYMBOL_GPL(__inet_lookup_established); /* called with local bh disabled */ static int __inet_check_established(struct inet_timewait_death_row *death_row, struct sock *sk, __u16 lport, struct inet_timewait_sock **twp) { struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_sock *inet = inet_sk(sk); __be32 daddr = inet->inet_rcv_saddr; __be32 saddr = inet->inet_daddr; int dif = sk->sk_bound_dev_if; INET_ADDR_COOKIE(acookie, saddr, daddr) const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); struct net *net = sock_net(sk); unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->inet_dport); struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); spinlock_t *lock = inet_ehash_lockp(hinfo, hash); struct sock *sk2; const struct hlist_nulls_node *node; struct inet_timewait_sock *tw; int twrefcnt = 0; spin_lock(lock); /* Check TIME-WAIT sockets first. */ sk_nulls_for_each(sk2, node, &head->twchain) { tw = inet_twsk(sk2); if (INET_TW_MATCH(sk2, net, hash, acookie, saddr, daddr, ports, dif)) { if (twsk_unique(sk, sk2, twp)) goto unique; else goto not_unique; } } tw = NULL; /* And established part... */ sk_nulls_for_each(sk2, node, &head->chain) { if (INET_MATCH(sk2, net, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } unique: /* Must record num and sport now. Otherwise we will see * in hash table socket with a funny identity. */ inet->inet_num = lport; inet->inet_sport = htons(lport); sk->sk_hash = hash; WARN_ON(!sk_unhashed(sk)); __sk_nulls_add_node_rcu(sk, &head->chain); if (tw) { twrefcnt = inet_twsk_unhash(tw); NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); } spin_unlock(lock); if (twrefcnt) inet_twsk_put(tw); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); if (twp) { *twp = tw; } else if (tw) { /* Silly. Should hash-dance instead... */ inet_twsk_deschedule(tw, death_row); inet_twsk_put(tw); } return 0; not_unique: spin_unlock(lock); return -EADDRNOTAVAIL; } static inline u32 inet_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, inet->inet_daddr, inet->inet_dport); } int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct hlist_nulls_head *list; spinlock_t *lock; struct inet_ehash_bucket *head; int twrefcnt = 0; WARN_ON(!sk_unhashed(sk)); sk->sk_hash = inet_sk_ehashfn(sk); head = inet_ehash_bucket(hashinfo, sk->sk_hash); list = &head->chain; lock = inet_ehash_lockp(hashinfo, sk->sk_hash); spin_lock(lock); __sk_nulls_add_node_rcu(sk, list); if (tw) { WARN_ON(sk->sk_hash != tw->tw_hash); twrefcnt = inet_twsk_unhash(tw); } spin_unlock(lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return twrefcnt; } EXPORT_SYMBOL_GPL(__inet_hash_nolisten); static void __inet_hash(struct sock *sk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_listen_hashbucket *ilb; if (sk->sk_state != TCP_LISTEN) { __inet_hash_nolisten(sk, NULL); return; } WARN_ON(!sk_unhashed(sk)); ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; spin_lock(&ilb->lock); __sk_nulls_add_node_rcu(sk, &ilb->head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); spin_unlock(&ilb->lock); } void inet_hash(struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { local_bh_disable(); __inet_hash(sk); local_bh_enable(); } } EXPORT_SYMBOL_GPL(inet_hash); void inet_unhash(struct sock *sk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; spinlock_t *lock; int done; if (sk_unhashed(sk)) return; if (sk->sk_state == TCP_LISTEN) lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock; else lock = inet_ehash_lockp(hashinfo, sk->sk_hash); spin_lock_bh(lock); done =__sk_nulls_del_node_init_rcu(sk); if (done) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_unlock_bh(lock); } EXPORT_SYMBOL_GPL(inet_unhash); int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **), int (*hash)(struct sock *sk, struct inet_timewait_sock *twp)) { struct inet_hashinfo *hinfo = death_row->hashinfo; const unsigned short snum = inet_sk(sk)->inet_num; struct inet_bind_hashbucket *head; struct inet_bind_bucket *tb; int ret; struct net *net = sock_net(sk); int twrefcnt = 1; if (!snum) { int i, remaining, low, high, port; static u32 hint; u32 offset = hint + port_offset; struct hlist_node *node; struct inet_timewait_sock *tw = NULL; inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; local_bh_disable(); for (i = 1; i <= remaining; i++) { port = low + (i + offset) % remaining; if (inet_is_reserved_local_port(port)) continue; head = &hinfo->bhash[inet_bhashfn(net, port, hinfo->bhash_size)]; spin_lock(&head->lock); /* Does not bother with rcv_saddr checks, * because the established check is already * unique enough. */ inet_bind_bucket_for_each(tb, node, &head->chain) { if (net_eq(ib_net(tb), net) && tb->port == port) { if (tb->fastreuse >= 0) goto next_port; WARN_ON(hlist_empty(&tb->owners)); if (!check_established(death_row, sk, port, &tw)) goto ok; goto next_port; } } tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, head, port); if (!tb) { spin_unlock(&head->lock); break; } tb->fastreuse = -1; goto ok; next_port: spin_unlock(&head->lock); } local_bh_enable(); return -EADDRNOTAVAIL; ok: hint += i; /* Head lock still held and bh's disabled */ inet_bind_hash(sk, tb, port); if (sk_unhashed(sk)) { inet_sk(sk)->inet_sport = htons(port); twrefcnt += hash(sk, tw); } if (tw) twrefcnt += inet_twsk_bind_unhash(tw, hinfo); spin_unlock(&head->lock); if (tw) { inet_twsk_deschedule(tw, death_row); while (twrefcnt) { twrefcnt--; inet_twsk_put(tw); } } ret = 0; goto out; } head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)]; tb = inet_csk(sk)->icsk_bind_hash; spin_lock_bh(&head->lock); if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { hash(sk, NULL); spin_unlock_bh(&head->lock); return 0; } else { spin_unlock(&head->lock); /* No definite answer... Walk to established hash table */ ret = check_established(death_row, sk, snum, NULL); out: local_bh_enable(); return ret; } } /* * Bind a port for a connect operation and hash it. */ int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk), __inet_check_established, __inet_hash_nolisten); } EXPORT_SYMBOL_GPL(inet_hash_connect); void inet_hashinfo_init(struct inet_hashinfo *h) { int i; atomic_set(&h->bsockets, 0); for (i = 0; i < INET_LHTABLE_SIZE; i++) { spin_lock_init(&h->listening_hash[i].lock); INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, i + LISTENING_NULLS_BASE); } } EXPORT_SYMBOL_GPL(inet_hashinfo_init);
gpl-2.0
xiaognol/android_kernel_chm_cl00
net/ipv4/udplite.c
7409
3532
/* * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). * * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> * * Changes: * Fixes: * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "UDPLite: " fmt #include <linux/export.h> #include "udp_impl.h" struct udp_table udplite_table __read_mostly; EXPORT_SYMBOL(udplite_table); static int udplite_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); } static void udplite_err(struct sk_buff *skb, u32 info) { __udp4_lib_err(skb, info, &udplite_table); } static const struct net_protocol udplite_protocol = { .handler = udplite_rcv, .err_handler = udplite_err, .no_policy = 1, .netns_ok = 1, }; struct proto udplite_prot = { .name = "UDP-Lite", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udplite_sk_init, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, .backlog_rcv = udp_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .get_port = udp_v4_get_port, .obj_size = sizeof(struct udp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .h.udp_table = &udplite_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif .clear_sk = sk_prot_clear_portaddr_nulls, }; EXPORT_SYMBOL(udplite_prot); static struct inet_protosw udplite4_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_UDPLITE, .prot = &udplite_prot, .ops = &inet_dgram_ops, .no_check = 0, /* must checksum (RFC 3828) */ .flags = INET_PROTOSW_PERMANENT, }; #ifdef CONFIG_PROC_FS static const struct file_operations udplite_afinfo_seq_fops = { .owner = THIS_MODULE, .open = udp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct udp_seq_afinfo udplite4_seq_afinfo = { .name = "udplite", .family = AF_INET, .udp_table = &udplite_table, .seq_fops = &udplite_afinfo_seq_fops, .seq_ops = { .show = udp4_seq_show, }, }; static int __net_init udplite4_proc_init_net(struct net *net) { return udp_proc_register(net, &udplite4_seq_afinfo); } static void __net_exit udplite4_proc_exit_net(struct net *net) { udp_proc_unregister(net, &udplite4_seq_afinfo); } static struct pernet_operations udplite4_net_ops = { .init = udplite4_proc_init_net, .exit = udplite4_proc_exit_net, }; static __init int udplite4_proc_init(void) { return register_pernet_subsys(&udplite4_net_ops); } #else static inline int udplite4_proc_init(void) { return 0; } #endif void __init udplite4_register(void) { udp_table_init(&udplite_table, "UDP-Lite"); if (proto_register(&udplite_prot, 1)) goto out_register_err; if (inet_add_protocol(&udplite_protocol, IPPROTO_UDPLITE) < 0) goto out_unregister_proto; inet_register_protosw(&udplite4_protosw); if (udplite4_proc_init()) pr_err("%s: Cannot register /proc!\n", __func__); return; out_unregister_proto: proto_unregister(&udplite_prot); out_register_err: pr_crit("%s: Cannot add UDP-Lite protocol\n", __func__); }
gpl-2.0
nutterpc/demonkernel-I9505-TW
arch/mips/powertv/asic/prealloc-calliope.c
8689
9523
/* * Memory pre-allocations for Calliope boxes. * * Copyright (C) 2005-2009 Scientific-Atlanta, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Author: Ken Eppinett * David Schleef <ds@schleef.org> */ #include <linux/init.h> #include <linux/ioport.h> #include <asm/mach-powertv/asic.h> #include "prealloc.h" /* * NON_DVR_CAPABLE CALLIOPE RESOURCES */ struct resource non_dvr_calliope_resources[] __initdata = { /* * VIDEO / LX1 */ /* Delta-Mu 1 image (2MiB) */ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1, IORESOURCE_MEM) /* Delta-Mu 1 monitor (8KiB) */ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1, IORESOURCE_MEM) /* Delta-Mu 1 RAM (~36.9MiB (32MiB - (2MiB + 8KiB))) */ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26700000-1, IORESOURCE_MEM) /* * Sysaudio Driver */ /* DSP code and data images (1MiB) */ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC CPU PCM buffer (40KiB) */ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC AUX buffer (128KiB) */ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC Main buffer (128KiB) */ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * STAVEM driver/STAPI */ /* 6MiB */ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * DOCSIS Subsystem */ /* 7MiB */ PREALLOC_DOCSIS("Docsis", 0x27500000, 0x27c00000-1, IORESOURCE_MEM) /* * GHW HAL Driver */ /* PowerTV Graphics Heap (14MiB) */ PREALLOC_NORMAL("GraphicsHeap", 0x26700000, 0x26700000+(14*1048576)-1, IORESOURCE_MEM) /* * multi com buffer area */ /* 128KiB */ PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1, IORESOURCE_MEM) /* * DMA Ring buffer (don't need recording buffers) */ /* 680KiB */ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit0 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * AVFS: player HAL memory */ /* 945K * 3 for playback */ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * PMEM */ /* Persistent memory for diagnostics (64KiB) */ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Smartcard */ /* Read and write buffers for Internal/External cards (10KiB) */ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * NAND Flash */ /* 10KiB */ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1, IORESOURCE_MEM) /* * Synopsys GMAC Memory Region */ /* 64KiB */ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * TFTPBuffer * * This buffer is used in some minimal configurations (e.g. two-way * loader) for storing software images */ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Add other resources here */ /* * End of Resource marker */ { .flags = 0, }, }; struct resource non_dvr_vze_calliope_resources[] __initdata = { /* * VIDEO / LX1 */ /* Delta-Mu 1 image (2MiB) */ PREALLOC_NORMAL("ST231aImage", 0x22000000, 0x22200000-1, IORESOURCE_MEM) /* Delta-Mu 1 monitor (8KiB) */ PREALLOC_NORMAL("ST231aMonitor", 0x22200000, 0x22202000-1, IORESOURCE_MEM) /* Delta-Mu 1 RAM (10.12MiB) */ PREALLOC_NORMAL("MediaMemory1", 0x22202000, 0x22C20B85-1, IORESOURCE_MEM) /* * Sysaudio Driver */ /* DSP code and data images (1MiB) */ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC CPU PCM buffer (40KiB) */ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC AUX buffer (16KiB) */ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC Main buffer (16KiB) */ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * STAVEM driver/STAPI */ /* 3.125MiB */ PREALLOC_NORMAL("AVMEMPartition0", 0x20396000, 0x206B6000-1, IORESOURCE_MEM) /* * GHW HAL Driver */ /* PowerTV Graphics Heap (2.59MiB) */ PREALLOC_NORMAL("GraphicsHeap", 0x20100000, 0x20396000-1, IORESOURCE_MEM) /* * multi com buffer area */ /* 128KiB */ PREALLOC_NORMAL("MulticomSHM", 0x206B6000, 0x206D6000-1, IORESOURCE_MEM) /* * DMA Ring buffer (don't need recording buffers) */ /* 680KiB */ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit0 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * PMEM */ /* Persistent memory for diagnostics (64KiB) */ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Smartcard */ /* Read and write buffers for Internal/External cards (10KiB) */ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * NAND Flash */ /* 10KiB */ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1, IORESOURCE_MEM) /* * Synopsys GMAC Memory Region */ /* 64KiB */ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Add other resources here */ /* * End of Resource marker */ { .flags = 0, }, }; struct resource non_dvr_vzf_calliope_resources[] __initdata = { /* * VIDEO / LX1 */ /* Delta-Mu 1 image (2MiB) */ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1, IORESOURCE_MEM) /* Delta-Mu 1 monitor (8KiB) */ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1, IORESOURCE_MEM) /* Delta-Mu 1 RAM (~19.4 (21.5MiB - (2MiB + 8KiB))) */ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x25580000-1, IORESOURCE_MEM) /* * Sysaudio Driver */ /* DSP code and data images (1MiB) */ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC CPU PCM buffer (40KiB) */ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC AUX buffer (128KiB) */ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC Main buffer (128KiB) */ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * STAVEM driver/STAPI */ /* 4.5MiB */ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00480000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * GHW HAL Driver */ /* PowerTV Graphics Heap (14MiB) */ PREALLOC_NORMAL("GraphicsHeap", 0x25600000, 0x25600000+(14*1048576)-1, IORESOURCE_MEM) /* * multi com buffer area */ /* 128KiB */ PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1, IORESOURCE_MEM) /* * DMA Ring buffer (don't need recording buffers) */ /* 680KiB */ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit0 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit1 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * AVFS: player HAL memory */ /* 945K * 3 for playback */ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * PMEM */ /* Persistent memory for diagnostics (64KiB) */ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Smartcard */ /* Read and write buffers for Internal/External cards (10KiB) */ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * NAND Flash */ /* 10KiB */ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1, IORESOURCE_MEM) /* * Synopsys GMAC Memory Region */ /* 64KiB */ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Add other resources here */ /* * End of Resource marker */ { .flags = 0, }, };
gpl-2.0
parheliamm/SCH-i939_Kernel
net/802/p8023.c
14065
1687
/* * NET3: 802.3 data link hooks used for IPX 802.3 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * 802.3 isn't really a protocol data link layer. Some old IPX stuff * uses it however. Note that there is only one 802.3 protocol layer * in the system. We don't currently support different protocols * running raw 802.3 on different devices. Thankfully nobody else * has done anything like the old IPX. */ #include <linux/in.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/datalink.h> #include <net/p8022.h> /* * Place an 802.3 header on a packet. The driver will do the mac * addresses, we just need to give it the buffer length. */ static int p8023_request(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) { struct net_device *dev = skb->dev; dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); return dev_queue_xmit(skb); } /* * Create an 802.3 client. Note there can be only one 802.3 client */ struct datalink_proto *make_8023_client(void) { struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC); if (proto) { proto->header_length = 0; proto->request = p8023_request; } return proto; } /* * Destroy the 802.3 client. */ void destroy_8023_client(struct datalink_proto *dl) { kfree(dl); } EXPORT_SYMBOL(destroy_8023_client); EXPORT_SYMBOL(make_8023_client); MODULE_LICENSE("GPL");
gpl-2.0
ambikadash/linux-fqt
arch/x86/xen/p2m.c
242
35903
/* * Xen leaves the responsibility for maintaining p2m mappings to the * guests themselves, but it must also access and update the p2m array * during suspend/resume when all the pages are reallocated. * * The p2m table is logically a flat array, but we implement it as a * three-level tree to allow the address space to be sparse. * * Xen * | * p2m_top p2m_top_mfn * / \ / \ * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn * / \ / \ / / * p2m p2m p2m p2m p2m p2m p2m ... * * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. * * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the * maximum representable pseudo-physical address space is: * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages * * P2M_PER_PAGE depends on the architecture, as a mfn is always * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to * 512 and 1024 entries respectively. * * In short, these structures contain the Machine Frame Number (MFN) of the PFN. * * However not all entries are filled with MFNs. Specifically for all other * leaf entries, or for the top root, or middle one, for which there is a void * entry, we assume it is "missing". So (for example) * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. * * We also have the possibility of setting 1-1 mappings on certain regions, so * that: * pfn_to_mfn(0xc0000)=0xc0000 * * The benefit of this is, that we can assume for non-RAM regions (think * PCI BARs, or ACPI spaces), we can create mappings easily b/c we * get the PFN value to match the MFN. * * For this to work efficiently we have one new page p2m_identity and * allocate (via reserved_brk) any other pages we need to cover the sides * (1GB or 4MB boundary violations). All entries in p2m_identity are set to * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs, * no other fancy value). * * On lookup we spot that the entry points to p2m_identity and return the * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. * If the entry points to an allocated page, we just proceed as before and * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in * appropriate functions (pfn_to_mfn). * * The reason for having the IDENTITY_FRAME_BIT instead of just returning the * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a * non-identity pfn. To protect ourselves against we elect to set (and get) the * IDENTITY_FRAME_BIT on all identity mapped PFNs. * * This simplistic diagram is used to explain the more subtle piece of code. * There is also a digram of the P2M at the end that can help. * Imagine your E820 looking as so: * * 1GB 2GB * /-------------------+---------\/----\ /----------\ /---+-----\ * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | * \-------------------+---------/\----/ \----------/ \---+-----/ * ^- 1029MB ^- 2001MB * * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100), * 2048MB = 524288 (0x80000)] * * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB * is actually not present (would have to kick the balloon driver to put it in). * * When we are told to set the PFNs for identity mapping (see patch: "xen/setup: * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start * of the PFN and the end PFN (263424 and 512256 respectively). The first step * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page * covers 512^2 of page estate (1GB) and in case the start or end PFN is not * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn * to end pfn. We reserve_brk top leaf pages if they are missing (means they * point to p2m_mid_missing). * * With the E820 example above, 263424 is not 1GB aligned so we allocate a * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. * Each entry in the allocate page is "missing" (points to p2m_missing). * * Next stage is to determine if we need to do a more granular boundary check * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. * We check if the start pfn and end pfn violate that boundary check, and if * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer * granularity of setting which PFNs are missing and which ones are identity. * In our example 263424 and 512256 both fail the check so we reserve_brk two * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" * values) and assign them to p2m[1][2] and p2m[1][488] respectively. * * At this point we would at minimum reserve_brk one page, but could be up to * three. Each call to set_phys_range_identity has at maximum a three page * cost. If we were to query the P2M at this stage, all those entries from * start PFN through end PFN (so 1029MB -> 2001MB) would return * INVALID_P2M_ENTRY ("missing"). * * The next step is to walk from the start pfn to the end pfn setting * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. * If we find that the middle leaf is pointing to p2m_missing we can swap it * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this * point we do not need to worry about boundary aligment (so no need to * reserve_brk a middle page, figure out which PFNs are "missing" and which * ones are identity), as that has been done earlier. If we find that the * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference * that page (which covers 512 PFNs) and set the appropriate PFN with * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we * set from p2m[1][2][256->511] and p2m[1][488][0->256] with * IDENTITY_FRAME_BIT set. * * All other regions that are void (or not filled) either point to p2m_missing * (considered missing) or have the default value of INVALID_P2M_ENTRY (also * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] * contain the INVALID_P2M_ENTRY value and are considered "missing." * * This is what the p2m ends up looking (for the E820 above) with this * fabulous drawing: * * p2m /--------------\ * /-----\ | &mfn_list[0],| /-----------------\ * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. | * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] | * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] | * |-----| \ | [p2m_identity]+\\ | .... | * | 2 |--\ \-------------------->| ... | \\ \----------------/ * |-----| \ \---------------/ \\ * | 3 |\ \ \\ p2m_identity * |-----| \ \-------------------->/---------------\ /-----------------\ * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... | * \-----/ / | [p2m_identity]+-->| ..., ~0 | * / /---------------\ | .... | \-----------------/ * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. | * / | IDENTITY[@256]|<----/ \---------------/ * / | ~0, ~0, .... | * | \---------------/ * | * p2m_mid_missing p2m_missing * /-----------------\ /------------\ * | [p2m_missing] +---->| ~0, ~0, ~0 | * | [p2m_missing] +---->| ..., ~0 | * \-----------------/ \------------/ * * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) */ #include <linux/init.h> #include <linux/module.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <asm/cache.h> #include <asm/setup.h> #include <asm/xen/page.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include <xen/balloon.h> #include <xen/grant_table.h> #include "multicalls.h" #include "xen-ops.h" static void __init m2p_override_init(void); unsigned long xen_max_p2m_pfn __read_mostly; #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) #define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *)) #define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **)) #define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) /* Placeholders for holes in the address space */ static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); /* We might hit two boundary violations at the start and end, at max each * boundary violation will require three middle nodes. */ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); /* When we populate back during bootup, the amount of pages can vary. The * max we have is seen is 395979, but that does not mean it can't be more. * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle * it can re-use Xen provided mfn_list array, so we only need to allocate at * most three P2M top nodes. */ RESERVE_BRK(p2m_populated, PAGE_SIZE * 3); static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); } static inline unsigned p2m_mid_index(unsigned long pfn) { return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; } static inline unsigned p2m_index(unsigned long pfn) { return pfn % P2M_PER_PAGE; } static void p2m_top_init(unsigned long ***top) { unsigned i; for (i = 0; i < P2M_TOP_PER_PAGE; i++) top[i] = p2m_mid_missing; } static void p2m_top_mfn_init(unsigned long *top) { unsigned i; for (i = 0; i < P2M_TOP_PER_PAGE; i++) top[i] = virt_to_mfn(p2m_mid_missing_mfn); } static void p2m_top_mfn_p_init(unsigned long **top) { unsigned i; for (i = 0; i < P2M_TOP_PER_PAGE; i++) top[i] = p2m_mid_missing_mfn; } static void p2m_mid_init(unsigned long **mid) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) mid[i] = p2m_missing; } static void p2m_mid_mfn_init(unsigned long *mid) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) mid[i] = virt_to_mfn(p2m_missing); } static void p2m_init(unsigned long *p2m) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) p2m[i] = INVALID_P2M_ENTRY; } /* * Build the parallel p2m_top_mfn and p2m_mid_mfn structures * * This is called both at boot time, and after resuming from suspend: * - At boot time we're called very early, and must use extend_brk() * to allocate memory. * * - After resume we're called from within stop_machine, but the mfn * tree should alreay be completely allocated. */ void __ref xen_build_mfn_list_list(void) { unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return; /* Pre-initialize p2m_top_mfn to be completely missing */ if (p2m_top_mfn == NULL) { p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(p2m_mid_missing_mfn); p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_p_init(p2m_top_mfn_p); p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_init(p2m_top_mfn); } else { /* Reinitialise, mfn's all change after migration */ p2m_mid_mfn_init(p2m_mid_missing_mfn); } for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); unsigned long **mid; unsigned long *mid_mfn_p; mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; /* Don't bother allocating any mfn mid levels if * they're just missing, just update the stored mfn, * since all could have changed over a migrate. */ if (mid == p2m_mid_missing) { BUG_ON(mididx); BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; continue; } if (mid_mfn_p == p2m_mid_missing_mfn) { /* * XXX boot-time only! We should never find * missing parts of the mfn tree after * runtime. extend_brk() will BUG if we call * it too late. */ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p); p2m_top_mfn_p[topidx] = mid_mfn_p; } p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); } } void xen_setup_mfn_list_list(void) { if (xen_feature(XENFEAT_auto_translated_physmap)) return; BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(p2m_top_mfn); HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; } /* Set up p2m_top to point to the domain-builder provided p2m pages */ void __init xen_build_dynamic_phys_to_machine(void) { unsigned long *mfn_list; unsigned long max_pfn; unsigned long pfn; if (xen_feature(XENFEAT_auto_translated_physmap)) return; mfn_list = (unsigned long *)xen_start_info->mfn_list; max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); xen_max_p2m_pfn = max_pfn; p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_missing); p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(p2m_mid_missing); p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_init(p2m_top); p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_identity); /* * The domain builder gives us a pre-constructed p2m array in * mfn_list for all the pages initially given to us, so we just * need to graft that into our tree structure. */ for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); if (p2m_top[topidx] == p2m_mid_missing) { unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; } /* * As long as the mfn_list has enough entries to completely * fill a p2m page, pointing into the array is ok. But if * not the entries beyond the last pfn will be undefined. */ if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { unsigned long p2midx; p2midx = max_pfn % P2M_PER_PAGE; for ( ; p2midx < P2M_PER_PAGE; p2midx++) mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY; } p2m_top[topidx][mididx] = &mfn_list[pfn]; } m2p_override_init(); } #ifdef CONFIG_X86_64 #include <linux/bootmem.h> unsigned long __init xen_revector_p2m_tree(void) { unsigned long va_start; unsigned long va_end; unsigned long pfn; unsigned long pfn_free = 0; unsigned long *mfn_list = NULL; unsigned long size; va_start = xen_start_info->mfn_list; /*We copy in increments of P2M_PER_PAGE * sizeof(unsigned long), * so make sure it is rounded up to that */ size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); va_end = va_start + size; /* If we were revectored already, don't do it again. */ if (va_start <= __START_KERNEL_map && va_start >= __PAGE_OFFSET) return 0; mfn_list = alloc_bootmem_align(size, PAGE_SIZE); if (!mfn_list) { pr_warn("Could not allocate space for a new P2M tree!\n"); return xen_start_info->mfn_list; } /* Fill it out with INVALID_P2M_ENTRY value */ memset(mfn_list, 0xFF, size); for (pfn = 0; pfn < ALIGN(MAX_DOMAIN_PAGES, P2M_PER_PAGE); pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx; unsigned long *mid_p; if (!p2m_top[topidx]) continue; if (p2m_top[topidx] == p2m_mid_missing) continue; mididx = p2m_mid_index(pfn); mid_p = p2m_top[topidx][mididx]; if (!mid_p) continue; if ((mid_p == p2m_missing) || (mid_p == p2m_identity)) continue; if ((unsigned long)mid_p == INVALID_P2M_ENTRY) continue; /* The old va. Rebase it on mfn_list */ if (mid_p >= (unsigned long *)va_start && mid_p <= (unsigned long *)va_end) { unsigned long *new; if (pfn_free > (size / sizeof(unsigned long))) { WARN(1, "Only allocated for %ld pages, but we want %ld!\n", size / sizeof(unsigned long), pfn_free); return 0; } new = &mfn_list[pfn_free]; copy_page(new, mid_p); p2m_top[topidx][mididx] = &mfn_list[pfn_free]; p2m_top_mfn_p[topidx][mididx] = virt_to_mfn(&mfn_list[pfn_free]); pfn_free += P2M_PER_PAGE; } /* This should be the leafs allocated for identity from _brk. */ } return (unsigned long)mfn_list; } #else unsigned long __init xen_revector_p2m_tree(void) { return 0; } #endif unsigned long get_phys_to_machine(unsigned long pfn) { unsigned topidx, mididx, idx; if (unlikely(pfn >= MAX_P2M_PFN)) return INVALID_P2M_ENTRY; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* * The INVALID_P2M_ENTRY is filled in both p2m_*identity * and in p2m_*missing, so returning the INVALID_P2M_ENTRY * would be wrong. */ if (p2m_top[topidx][mididx] == p2m_identity) return IDENTITY_FRAME(pfn); return p2m_top[topidx][mididx][idx]; } EXPORT_SYMBOL_GPL(get_phys_to_machine); static void *alloc_p2m_page(void) { return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); } static void free_p2m_page(void *p) { free_page((unsigned long)p); } /* * Fully allocate the p2m structure for a given pfn. We need to check * that both the top and mid levels are allocated, and make sure the * parallel mfn tree is kept in sync. We may race with other cpus, so * the new pages are installed with cmpxchg; if we lose the race then * simply free the page we allocated and use the one that's there. */ static bool alloc_p2m(unsigned long pfn) { unsigned topidx, mididx; unsigned long ***top_p, **mid; unsigned long *top_mfn_p, *mid_mfn; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); top_p = &p2m_top[topidx]; mid = *top_p; if (mid == p2m_mid_missing) { /* Mid level is missing, allocate a new one */ mid = alloc_p2m_page(); if (!mid) return false; p2m_mid_init(mid); if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) free_p2m_page(mid); } top_mfn_p = &p2m_top_mfn[topidx]; mid_mfn = p2m_top_mfn_p[topidx]; BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); if (mid_mfn == p2m_mid_missing_mfn) { /* Separately check the mid mfn level */ unsigned long missing_mfn; unsigned long mid_mfn_mfn; mid_mfn = alloc_p2m_page(); if (!mid_mfn) return false; p2m_mid_mfn_init(mid_mfn); missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn); if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) free_p2m_page(mid_mfn); else p2m_top_mfn_p[topidx] = mid_mfn; } if (p2m_top[topidx][mididx] == p2m_identity || p2m_top[topidx][mididx] == p2m_missing) { /* p2m leaf page is missing */ unsigned long *p2m; unsigned long *p2m_orig = p2m_top[topidx][mididx]; p2m = alloc_p2m_page(); if (!p2m) return false; p2m_init(p2m); if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig) free_p2m_page(p2m); else mid_mfn[mididx] = virt_to_mfn(p2m); } return true; } static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary) { unsigned topidx, mididx, idx; unsigned long *p2m; unsigned long *mid_mfn_p; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* Pfff.. No boundary cross-over, lets get out. */ if (!idx && check_boundary) return false; WARN(p2m_top[topidx][mididx] == p2m_identity, "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", topidx, mididx); /* * Could be done by xen_build_dynamic_phys_to_machine.. */ if (p2m_top[topidx][mididx] != p2m_missing) return false; /* Boundary cross-over for the edges: */ p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m); p2m_top[topidx][mididx] = p2m; /* For save/restore we need to MFN of the P2M saved */ mid_mfn_p = p2m_top_mfn_p[topidx]; WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", topidx, mididx); mid_mfn_p[mididx] = virt_to_mfn(p2m); return true; } static bool __init early_alloc_p2m(unsigned long pfn) { unsigned topidx = p2m_top_index(pfn); unsigned long *mid_mfn_p; unsigned long **mid; mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; if (mid == p2m_mid_missing) { mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); } /* And the save/restore P2M tables.. */ if (mid_mfn_p == p2m_mid_missing_mfn) { mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p); p2m_top_mfn_p[topidx] = mid_mfn_p; p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); /* Note: we don't set mid_mfn_p[midix] here, * look in early_alloc_p2m_middle */ } return true; } /* * Skim over the P2M tree looking at pages that are either filled with * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and * replace the P2M leaf with a p2m_missing or p2m_identity. * Stick the old page in the new P2M tree location. */ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) { unsigned topidx; unsigned mididx; unsigned ident_pfns; unsigned inv_pfns; unsigned long *p2m; unsigned long *mid_mfn_p; unsigned idx; unsigned long pfn; /* We only look when this entails a P2M middle layer */ if (p2m_index(set_pfn)) return false; for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { topidx = p2m_top_index(pfn); if (!p2m_top[topidx]) continue; if (p2m_top[topidx] == p2m_mid_missing) continue; mididx = p2m_mid_index(pfn); p2m = p2m_top[topidx][mididx]; if (!p2m) continue; if ((p2m == p2m_missing) || (p2m == p2m_identity)) continue; if ((unsigned long)p2m == INVALID_P2M_ENTRY) continue; ident_pfns = 0; inv_pfns = 0; for (idx = 0; idx < P2M_PER_PAGE; idx++) { /* IDENTITY_PFNs are 1:1 */ if (p2m[idx] == IDENTITY_FRAME(pfn + idx)) ident_pfns++; else if (p2m[idx] == INVALID_P2M_ENTRY) inv_pfns++; else break; } if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE)) goto found; } return false; found: /* Found one, replace old with p2m_identity or p2m_missing */ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); /* And the other for save/restore.. */ mid_mfn_p = p2m_top_mfn_p[topidx]; /* NOTE: Even if it is a p2m_identity it should still be point to * a page filled with INVALID_P2M_ENTRY entries. */ mid_mfn_p[mididx] = virt_to_mfn(p2m_missing); /* Reset where we want to stick the old page in. */ topidx = p2m_top_index(set_pfn); mididx = p2m_mid_index(set_pfn); /* This shouldn't happen */ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) early_alloc_p2m(set_pfn); if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) return false; p2m_init(p2m); p2m_top[topidx][mididx] = p2m; mid_mfn_p = p2m_top_mfn_p[topidx]; mid_mfn_p[mididx] = virt_to_mfn(p2m); return true; } bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!early_alloc_p2m(pfn)) return false; if (early_can_reuse_p2m_middle(pfn, mfn)) return __set_phys_to_machine(pfn, mfn); if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) return false; if (!__set_phys_to_machine(pfn, mfn)) return false; } return true; } unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) { unsigned long pfn; if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) return 0; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn_e - pfn_s; if (pfn_s > pfn_e) return 0; for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) { WARN_ON(!early_alloc_p2m(pfn)); } early_alloc_p2m_middle(pfn_s, true); early_alloc_p2m_middle(pfn_e, true); for (pfn = pfn_s; pfn < pfn_e; pfn++) if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) break; if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), "Identity mapping failed. We are %ld short of 1-1 mappings!\n", (pfn_e - pfn_s) - (pfn - pfn_s))) printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn); return pfn - pfn_s; } /* Try to install p2m mapping; fail if intermediate bits missing */ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { unsigned topidx, mididx, idx; /* don't track P2M changes in autotranslate guests */ if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return true; if (unlikely(pfn >= MAX_P2M_PFN)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; } topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* For sparse holes were the p2m leaf has real PFN along with * PCI holes, stick in the PFN as the MFN value. */ if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { if (p2m_top[topidx][mididx] == p2m_identity) return true; /* Swap over from MISSING to IDENTITY if needed. */ if (p2m_top[topidx][mididx] == p2m_missing) { WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, p2m_identity) != p2m_missing); return true; } } if (p2m_top[topidx][mididx] == p2m_missing) return mfn == INVALID_P2M_ENTRY; p2m_top[topidx][mididx][idx] = mfn; return true; } bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!alloc_p2m(pfn)) return false; if (!__set_phys_to_machine(pfn, mfn)) return false; } return true; } #define M2P_OVERRIDE_HASH_SHIFT 10 #define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT) static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH); static DEFINE_SPINLOCK(m2p_override_lock); static void __init m2p_override_init(void) { unsigned i; m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH, sizeof(unsigned long)); for (i = 0; i < M2P_OVERRIDE_HASH; i++) INIT_LIST_HEAD(&m2p_overrides[i]); } static unsigned long mfn_hash(unsigned long mfn) { return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT); } /* Add an MFN override for a particular page */ int m2p_add_override(unsigned long mfn, struct page *page, struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; pfn = page_to_pfn(page); if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } WARN_ON(PagePrivate(page)); SetPagePrivate(page); set_page_private(page, mfn); page->index = pfn_to_mfn(pfn); if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) return -ENOMEM; if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs = xen_mc_entry(sizeof(*kmap_op)); MULTI_grant_table_op(mcs.mc, GNTTABOP_map_grant_ref, kmap_op, 1); xen_mc_issue(PARAVIRT_LAZY_MMU); } } spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); spin_unlock_irqrestore(&m2p_override_lock, flags); /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other * pfn so that the following mfn_to_pfn(mfn) calls will return the * pfn from the m2p_override (the backend pfn) instead. * We need to do this because the pages shared by the frontend * (xen-blkfront) can be already locked (lock_page, called by * do_read_cache_page); when the userspace backend tries to use them * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so * do_blockdev_direct_IO is going to try to lock the same pages * again resulting in a deadlock. * As a side effect get_user_pages_fast might not be safe on the * frontend pages while they are being shared with the backend, * because mfn_to_pfn (that ends up being called by GUPF) will * return the backend pfn rather than the frontend pfn. */ pfn = mfn_to_pfn_no_overrides(mfn); if (get_phys_to_machine(pfn) == mfn) set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); return 0; } EXPORT_SYMBOL_GPL(m2p_add_override); int m2p_remove_override(struct page *page, struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long mfn; unsigned long pfn; unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; pfn = page_to_pfn(page); mfn = get_phys_to_machine(pfn); if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) return -EINVAL; if (!PageHighMem(page)) { address = (unsigned long)__va(pfn << PAGE_SHIFT); ptep = lookup_address(address, &level); if (WARN(ptep == NULL || level != PG_LEVEL_4K, "m2p_remove_override: pfn %lx not mapped", pfn)) return -EINVAL; } spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); WARN_ON(!PagePrivate(page)); ClearPagePrivate(page); set_phys_to_machine(pfn, page->index); if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs; struct gnttab_unmap_and_replace *unmap_op; struct page *scratch_page = get_balloon_scratch_page(); unsigned long scratch_page_address = (unsigned long) __va(page_to_pfn(scratch_page) << PAGE_SHIFT); /* * It might be that we queued all the m2p grant table * hypercalls in a multicall, then m2p_remove_override * get called before the multicall has actually been * issued. In this case handle is going to -1 because * it hasn't been modified yet. */ if (kmap_op->handle == -1) xen_mc_flush(); /* * Now if kmap_op->handle is negative it means that the * hypercall actually returned an error. */ if (kmap_op->handle == GNTST_general_error) { printk(KERN_WARNING "m2p_remove_override: " "pfn %lx mfn %lx, failed to modify kernel mappings", pfn, mfn); put_balloon_scratch_page(); return -1; } xen_mc_batch(); mcs = __xen_mc_entry( sizeof(struct gnttab_unmap_and_replace)); unmap_op = mcs.args; unmap_op->host_addr = kmap_op->host_addr; unmap_op->new_addr = scratch_page_address; unmap_op->handle = kmap_op->handle; MULTI_grant_table_op(mcs.mc, GNTTABOP_unmap_and_replace, unmap_op, 1); mcs = __xen_mc_entry(0); MULTI_update_va_mapping(mcs.mc, scratch_page_address, pfn_pte(page_to_pfn(scratch_page), PAGE_KERNEL_RO), 0); xen_mc_issue(PARAVIRT_LAZY_MMU); kmap_op->host_addr = 0; put_balloon_scratch_page(); } } /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present * somewhere in this domain, even before being added to the * m2p_override (see comment above in m2p_add_override). * If there are no other entries in the m2p_override corresponding * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for * the original pfn (the one shared by the frontend): the backend * cannot do any IO on this page anymore because it has been * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of * the original pfn causes mfn_to_pfn(mfn) to return the frontend * pfn again. */ mfn &= ~FOREIGN_FRAME_BIT; pfn = mfn_to_pfn_no_overrides(mfn); if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && m2p_find_override(mfn) == NULL) set_phys_to_machine(pfn, mfn); return 0; } EXPORT_SYMBOL_GPL(m2p_remove_override); struct page *m2p_find_override(unsigned long mfn) { unsigned long flags; struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)]; struct page *p, *ret; ret = NULL; spin_lock_irqsave(&m2p_override_lock, flags); list_for_each_entry(p, bucket, lru) { if (page_private(p) == mfn) { ret = p; break; } } spin_unlock_irqrestore(&m2p_override_lock, flags); return ret; } unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) { struct page *p = m2p_find_override(mfn); unsigned long ret = pfn; if (p) ret = page_to_pfn(p); return ret; } EXPORT_SYMBOL_GPL(m2p_find_override_pfn); #ifdef CONFIG_XEN_DEBUG_FS #include <linux/debugfs.h> #include "debugfs.h" static int p2m_dump_show(struct seq_file *m, void *v) { static const char * const level_name[] = { "top", "middle", "entry", "abnormal", "error"}; #define TYPE_IDENTITY 0 #define TYPE_MISSING 1 #define TYPE_PFN 2 #define TYPE_UNKNOWN 3 static const char * const type_name[] = { [TYPE_IDENTITY] = "identity", [TYPE_MISSING] = "missing", [TYPE_PFN] = "pfn", [TYPE_UNKNOWN] = "abnormal"}; unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; unsigned int uninitialized_var(prev_level); unsigned int uninitialized_var(prev_type); if (!p2m_top) return 0; for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); unsigned idx = p2m_index(pfn); unsigned lvl, type; lvl = 4; type = TYPE_UNKNOWN; if (p2m_top[topidx] == p2m_mid_missing) { lvl = 0; type = TYPE_MISSING; } else if (p2m_top[topidx] == NULL) { lvl = 0; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx] == NULL) { lvl = 1; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx] == p2m_identity) { lvl = 1; type = TYPE_IDENTITY; } else if (p2m_top[topidx][mididx] == p2m_missing) { lvl = 1; type = TYPE_MISSING; } else if (p2m_top[topidx][mididx][idx] == 0) { lvl = 2; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) { lvl = 2; type = TYPE_IDENTITY; } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) { lvl = 2; type = TYPE_MISSING; } else if (p2m_top[topidx][mididx][idx] == pfn) { lvl = 2; type = TYPE_PFN; } else if (p2m_top[topidx][mididx][idx] != pfn) { lvl = 2; type = TYPE_PFN; } if (pfn == 0) { prev_level = lvl; prev_type = type; } if (pfn == MAX_DOMAIN_PAGES-1) { lvl = 3; type = TYPE_UNKNOWN; } if (prev_type != type) { seq_printf(m, " [0x%lx->0x%lx] %s\n", prev_pfn_type, pfn, type_name[prev_type]); prev_pfn_type = pfn; prev_type = type; } if (prev_level != lvl) { seq_printf(m, " [0x%lx->0x%lx] level %s\n", prev_pfn_level, pfn, level_name[prev_level]); prev_pfn_level = pfn; prev_level = lvl; } } return 0; #undef TYPE_IDENTITY #undef TYPE_MISSING #undef TYPE_PFN #undef TYPE_UNKNOWN } static int p2m_dump_open(struct inode *inode, struct file *filp) { return single_open(filp, p2m_dump_show, NULL); } static const struct file_operations p2m_dump_fops = { .open = p2m_dump_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *d_mmu_debug; static int __init xen_p2m_debugfs(void) { struct dentry *d_xen = xen_init_debugfs(); if (d_xen == NULL) return -ENOMEM; d_mmu_debug = debugfs_create_dir("mmu", d_xen); debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); return 0; } fs_initcall(xen_p2m_debugfs); #endif /* CONFIG_XEN_DEBUG_FS */
gpl-2.0
getitnowmarketing/mecha_2.6.32
fs/gfs2/meta_io.c
498
11204
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/swap.h> #include <linux/delay.h> #include <linux/bio.h> #include <linux/gfs2_ondisk.h> #include "gfs2.h" #include "incore.h" #include "glock.h" #include "glops.h" #include "inode.h" #include "log.h" #include "lops.h" #include "meta_io.h" #include "rgrp.h" #include "trans.h" #include "util.h" static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) { int err; struct buffer_head *bh, *head; int nr_underway = 0; int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE)); BUG_ON(!PageLocked(page)); BUG_ON(!page_has_buffers(page)); head = page_buffers(page); bh = head; do { if (!buffer_mapped(bh)) continue; /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the page. Note that this can * potentially cause a busy-wait loop from pdflush and kswapd * activity, but those code paths have their own higher-level * throttling. */ if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { lock_buffer(bh); } else if (!trylock_buffer(bh)) { redirty_page_for_writepage(wbc, page); continue; } if (test_clear_buffer_dirty(bh)) { mark_buffer_async_write(bh); } else { unlock_buffer(bh); } } while ((bh = bh->b_this_page) != head); /* * The page and its buffers are protected by PageWriteback(), so we can * drop the bh refcounts early. */ BUG_ON(PageWriteback(page)); set_page_writeback(page); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh(write_op, bh); nr_underway++; } bh = next; } while (bh != head); unlock_page(page); err = 0; if (nr_underway == 0) end_page_writeback(page); return err; } static const struct address_space_operations aspace_aops = { .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, .sync_page = block_sync_page, }; /** * gfs2_aspace_get - Create and initialize a struct inode structure * @sdp: the filesystem the aspace is in * * Right now a struct inode is just a struct inode. Maybe Linux * will supply a more lightweight address space construct (that works) * in the future. * * Make sure pages/buffers in this aspace aren't in high memory. * * Returns: the aspace */ struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp) { struct inode *aspace; struct gfs2_inode *ip; aspace = new_inode(sdp->sd_vfs); if (aspace) { mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS); aspace->i_mapping->a_ops = &aspace_aops; aspace->i_size = ~0ULL; ip = GFS2_I(aspace); clear_bit(GIF_USER, &ip->i_flags); insert_inode_hash(aspace); } return aspace; } void gfs2_aspace_put(struct inode *aspace) { remove_inode_hash(aspace); iput(aspace); } /** * gfs2_meta_sync - Sync all buffers associated with a glock * @gl: The glock * */ void gfs2_meta_sync(struct gfs2_glock *gl) { struct address_space *mapping = gl->gl_aspace->i_mapping; int error; filemap_fdatawrite(mapping); error = filemap_fdatawait(mapping); if (error) gfs2_io_error(gl->gl_sbd); } /** * gfs2_getbuf - Get a buffer with a given address space * @gl: the glock * @blkno: the block number (filesystem scope) * @create: 1 if the buffer should be created * * Returns: the buffer */ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) { struct address_space *mapping = gl->gl_aspace->i_mapping; struct gfs2_sbd *sdp = gl->gl_sbd; struct page *page; struct buffer_head *bh; unsigned int shift; unsigned long index; unsigned int bufnum; shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift; index = blkno >> shift; /* convert block to page */ bufnum = blkno - (index << shift); /* block buf index within page */ if (create) { for (;;) { page = grab_cache_page(mapping, index); if (page) break; yield(); } } else { page = find_lock_page(mapping, index); if (!page) return NULL; } if (!page_has_buffers(page)) create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0); /* Locate header for our buffer within our page */ for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page) /* Do nothing */; get_bh(bh); if (!buffer_mapped(bh)) map_bh(bh, sdp->sd_vfs, blkno); unlock_page(page); mark_page_accessed(page); page_cache_release(page); return bh; } static void meta_prep_new(struct buffer_head *bh) { struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); unlock_buffer(bh); mh->mh_magic = cpu_to_be32(GFS2_MAGIC); } /** * gfs2_meta_new - Get a block * @gl: The glock associated with this block * @blkno: The block number * * Returns: The buffer */ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) { struct buffer_head *bh; bh = gfs2_getbuf(gl, blkno, CREATE); meta_prep_new(bh); return bh; } /** * gfs2_meta_read - Read a block from disk * @gl: The glock covering the block * @blkno: The block number * @flags: flags * @bhp: the place where the buffer is returned (NULL on failure) * * Returns: errno */ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, struct buffer_head **bhp) { struct gfs2_sbd *sdp = gl->gl_sbd; struct buffer_head *bh; if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) return -EIO; *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); return 0; } bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(READ_SYNC | (1 << BIO_RW_META), bh); if (!(flags & DIO_WAIT)) return 0; wait_on_buffer(bh); if (unlikely(!buffer_uptodate(bh))) { struct gfs2_trans *tr = current->journal_info; if (tr && tr->tr_touched) gfs2_io_error_bh(sdp, bh); brelse(bh); return -EIO; } return 0; } /** * gfs2_meta_wait - Reread a block from disk * @sdp: the filesystem * @bh: The block to wait for * * Returns: errno */ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) { if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) return -EIO; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { struct gfs2_trans *tr = current->journal_info; if (tr && tr->tr_touched) gfs2_io_error_bh(sdp, bh); return -EIO; } if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) return -EIO; return 0; } /** * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer * @gl: the glock the buffer belongs to * @bh: The buffer to be attached to * @meta: Flag to indicate whether its metadata or not */ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, int meta) { struct gfs2_bufdata *bd; if (meta) lock_page(bh->b_page); if (bh->b_private) { if (meta) unlock_page(bh->b_page); return; } bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL); bd->bd_bh = bh; bd->bd_gl = gl; INIT_LIST_HEAD(&bd->bd_list_tr); if (meta) lops_init_le(&bd->bd_le, &gfs2_buf_lops); else lops_init_le(&bd->bd_le, &gfs2_databuf_lops); bh->b_private = bd; if (meta) unlock_page(bh->b_page); } void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta) { struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host); struct gfs2_bufdata *bd = bh->b_private; if (test_clear_buffer_pinned(bh)) { list_del_init(&bd->bd_le.le_list); if (meta) { gfs2_assert_warn(sdp, sdp->sd_log_num_buf); sdp->sd_log_num_buf--; tr->tr_num_buf_rm++; } else { gfs2_assert_warn(sdp, sdp->sd_log_num_databuf); sdp->sd_log_num_databuf--; tr->tr_num_databuf_rm++; } tr->tr_touched = 1; brelse(bh); } if (bd) { if (bd->bd_ail) { gfs2_remove_from_ail(bd); bh->b_private = NULL; bd->bd_bh = NULL; bd->bd_blkno = bh->b_blocknr; gfs2_trans_add_revoke(sdp, bd); } } clear_buffer_dirty(bh); clear_buffer_uptodate(bh); } /** * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore * @ip: the inode who owns the buffers * @bstart: the first buffer in the run * @blen: the number of buffers in the run * */ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *bh; while (blen) { bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); if (bh) { lock_buffer(bh); gfs2_log_lock(sdp); gfs2_remove_from_journal(bh, current->journal_info, 1); gfs2_log_unlock(sdp); unlock_buffer(bh); brelse(bh); } bstart++; blen--; } } /** * gfs2_meta_indirect_buffer - Get a metadata buffer * @ip: The GFS2 inode * @height: The level of this buf in the metadata (indir addr) tree (if any) * @num: The block number (device relative) of the buffer * @new: Non-zero if we may create a new buffer * @bhp: the buffer is returned here * * Returns: errno */ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, int new, struct buffer_head **bhp) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_glock *gl = ip->i_gl; struct buffer_head *bh; int ret = 0; if (new) { BUG_ON(height == 0); bh = gfs2_meta_new(gl, num); gfs2_trans_add_bh(ip->i_gl, bh, 1); gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header)); } else { u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI; ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh); if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) { brelse(bh); ret = -EIO; } } *bhp = bh; return ret; } /** * gfs2_meta_ra - start readahead on an extent of a file * @gl: the glock the blocks belong to * @dblock: the starting disk block * @extlen: the number of blocks in the extent * * returns: the first buffer in the extent */ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) { struct gfs2_sbd *sdp = gl->gl_sbd; struct buffer_head *first_bh, *bh; u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> sdp->sd_sb.sb_bsize_shift; BUG_ON(!extlen); if (max_ra < 1) max_ra = 1; if (extlen > max_ra) extlen = max_ra; first_bh = gfs2_getbuf(gl, dblock, CREATE); if (buffer_uptodate(first_bh)) goto out; if (!buffer_locked(first_bh)) ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh); dblock++; extlen--; while (extlen) { bh = gfs2_getbuf(gl, dblock, CREATE); if (!buffer_uptodate(bh) && !buffer_locked(bh)) ll_rw_block(READA, 1, &bh); brelse(bh); dblock++; extlen--; if (!buffer_locked(first_bh) && buffer_uptodate(first_bh)) goto out; } wait_on_buffer(first_bh); out: return first_bh; }
gpl-2.0
TeamSXL/htc-cm-kernel-doubleshot-34_old
drivers/bluetooth/btmrvl_debugfs.c
1266
11521
/** * Marvell Bluetooth driver: debugfs related functions * * Copyright (C) 2009, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. **/ #include <linux/debugfs.h> #include <linux/slab.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btmrvl_drv.h" struct btmrvl_debugfs_data { struct dentry *config_dir; struct dentry *status_dir; /* config */ struct dentry *psmode; struct dentry *pscmd; struct dentry *hsmode; struct dentry *hscmd; struct dentry *gpiogap; struct dentry *hscfgcmd; /* status */ struct dentry *curpsmode; struct dentry *hsstate; struct dentry *psstate; struct dentry *txdnldready; }; static ssize_t btmrvl_hscfgcmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = strict_strtol(buf, 10, &result); if (ret) return ret; priv->btmrvl_dev.hscfgcmd = result; if (priv->btmrvl_dev.hscfgcmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_hscfgcmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscfgcmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_hscfgcmd_fops = { .read = btmrvl_hscfgcmd_read, .write = btmrvl_hscfgcmd_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = strict_strtol(buf, 10, &result); if (ret) return ret; priv->btmrvl_dev.psmode = result; return count; } static ssize_t btmrvl_psmode_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.psmode); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_psmode_fops = { .read = btmrvl_psmode_read, .write = btmrvl_psmode_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = strict_strtol(buf, 10, &result); if (ret) return ret; priv->btmrvl_dev.pscmd = result; if (priv->btmrvl_dev.pscmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_pscmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.pscmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_pscmd_fops = { .read = btmrvl_pscmd_read, .write = btmrvl_pscmd_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = strict_strtol(buf, 16, &result); if (ret) return ret; priv->btmrvl_dev.gpio_gap = result; return count; } static ssize_t btmrvl_gpiogap_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "0x%x\n", priv->btmrvl_dev.gpio_gap); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_gpiogap_fops = { .read = btmrvl_gpiogap_read, .write = btmrvl_gpiogap_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = strict_strtol(buf, 10, &result); if (ret) return ret; priv->btmrvl_dev.hscmd = result; if (priv->btmrvl_dev.hscmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_hscmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_hscmd_fops = { .read = btmrvl_hscmd_read, .write = btmrvl_hscmd_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; long result, ret; memset(buf, 0, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; ret = strict_strtol(buf, 10, &result); if (ret) return ret; priv->btmrvl_dev.hsmode = result; return count; } static ssize_t btmrvl_hsmode_read(struct file *file, char __user * userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hsmode); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_hsmode_fops = { .read = btmrvl_hsmode_read, .write = btmrvl_hsmode_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->psmode); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_curpsmode_fops = { .read = btmrvl_curpsmode_read, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->ps_state); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_psstate_fops = { .read = btmrvl_psstate_read, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->adapter->hs_state); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_hsstate_fops = { .read = btmrvl_hsstate_read, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.tx_dnld_rdy); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_txdnldready_fops = { .read = btmrvl_txdnldready_read, .open = simple_open, .llseek = default_llseek, }; void btmrvl_debugfs_init(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); struct btmrvl_debugfs_data *dbg; if (!hdev->debugfs) return; dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); priv->debugfs_data = dbg; if (!dbg) { BT_ERR("Can not allocate memory for btmrvl_debugfs_data."); return; } dbg->config_dir = debugfs_create_dir("config", hdev->debugfs); dbg->psmode = debugfs_create_file("psmode", 0644, dbg->config_dir, priv, &btmrvl_psmode_fops); dbg->pscmd = debugfs_create_file("pscmd", 0644, dbg->config_dir, priv, &btmrvl_pscmd_fops); dbg->gpiogap = debugfs_create_file("gpiogap", 0644, dbg->config_dir, priv, &btmrvl_gpiogap_fops); dbg->hsmode = debugfs_create_file("hsmode", 0644, dbg->config_dir, priv, &btmrvl_hsmode_fops); dbg->hscmd = debugfs_create_file("hscmd", 0644, dbg->config_dir, priv, &btmrvl_hscmd_fops); dbg->hscfgcmd = debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, priv, &btmrvl_hscfgcmd_fops); dbg->status_dir = debugfs_create_dir("status", hdev->debugfs); dbg->curpsmode = debugfs_create_file("curpsmode", 0444, dbg->status_dir, priv, &btmrvl_curpsmode_fops); dbg->psstate = debugfs_create_file("psstate", 0444, dbg->status_dir, priv, &btmrvl_psstate_fops); dbg->hsstate = debugfs_create_file("hsstate", 0444, dbg->status_dir, priv, &btmrvl_hsstate_fops); dbg->txdnldready = debugfs_create_file("txdnldready", 0444, dbg->status_dir, priv, &btmrvl_txdnldready_fops); } void btmrvl_debugfs_remove(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); struct btmrvl_debugfs_data *dbg = priv->debugfs_data; if (!dbg) return; debugfs_remove(dbg->psmode); debugfs_remove(dbg->pscmd); debugfs_remove(dbg->gpiogap); debugfs_remove(dbg->hsmode); debugfs_remove(dbg->hscmd); debugfs_remove(dbg->hscfgcmd); debugfs_remove(dbg->config_dir); debugfs_remove(dbg->curpsmode); debugfs_remove(dbg->psstate); debugfs_remove(dbg->hsstate); debugfs_remove(dbg->txdnldready); debugfs_remove(dbg->status_dir); kfree(dbg); }
gpl-2.0
carbonsoft/kernel
arch/x86/xen/irq.c
1522
3391
#include <linux/hardirq.h> #include <asm/x86_init.h> #include <xen/interface/xen.h> #include <xen/interface/sched.h> #include <xen/interface/vcpu.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include "xen-ops.h" /* * Force a proper event-channel callback from Xen after clearing the * callback mask. We do this in a very simple manner, by making a call * down into Xen. The pending flag will be checked by Xen on return. */ void xen_force_evtchn_callback(void) { (void)HYPERVISOR_xen_version(0, NULL); } static unsigned long xen_save_fl(void) { struct vcpu_info *vcpu; unsigned long flags; vcpu = percpu_read(xen_vcpu); /* flag has opposite sense of mask */ flags = !vcpu->evtchn_upcall_mask; /* convert to IF type flag -0 -> 0x00000000 -1 -> 0xffffffff */ return (-flags) & X86_EFLAGS_IF; } PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); static void xen_restore_fl(unsigned long flags) { struct vcpu_info *vcpu; /* convert from IF type flag */ flags = !(flags & X86_EFLAGS_IF); /* There's a one instruction preempt window here. We need to make sure we're don't switch CPUs between getting the vcpu pointer and updating the mask. */ preempt_disable(); vcpu = percpu_read(xen_vcpu); vcpu->evtchn_upcall_mask = flags; preempt_enable_no_resched(); /* Doesn't matter if we get preempted here, because any pending event will get dealt with anyway. */ if (flags == 0) { preempt_check_resched(); barrier(); /* unmask then check (avoid races) */ if (unlikely(vcpu->evtchn_upcall_pending)) xen_force_evtchn_callback(); } } PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); static void xen_irq_disable(void) { /* There's a one instruction preempt window here. We need to make sure we're don't switch CPUs between getting the vcpu pointer and updating the mask. */ preempt_disable(); percpu_read(xen_vcpu)->evtchn_upcall_mask = 1; preempt_enable_no_resched(); } PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); static void xen_irq_enable(void) { struct vcpu_info *vcpu; /* We don't need to worry about being preempted here, since either a) interrupts are disabled, so no preemption, or b) the caller is confused and is trying to re-enable interrupts on an indeterminate processor. */ vcpu = percpu_read(xen_vcpu); vcpu->evtchn_upcall_mask = 0; /* Doesn't matter if we get preempted here, because any pending event will get dealt with anyway. */ barrier(); /* unmask then check (avoid races) */ if (unlikely(vcpu->evtchn_upcall_pending)) xen_force_evtchn_callback(); } PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable); static void xen_safe_halt(void) { /* Blocking includes an implicit local_irq_enable(). */ if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0) BUG(); } static void xen_halt(void) { if (irqs_disabled()) HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); else xen_safe_halt(); } static const struct pv_irq_ops xen_irq_ops __initdata = { .save_fl = PV_CALLEE_SAVE(xen_save_fl), .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), .irq_enable = PV_CALLEE_SAVE(xen_irq_enable), .safe_halt = xen_safe_halt, .halt = xen_halt, #ifdef CONFIG_X86_64 .adjust_exception_frame = xen_adjust_exception_frame, #endif }; void __init xen_init_irq_ops() { pv_irq_ops = xen_irq_ops; x86_init.irqs.intr_init = xen_init_IRQ; }
gpl-2.0
NieNs/IM-A750K
drivers/staging/msm/ebi2_l2f.c
3058
14489
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include "msm_fb.h" #include <linux/memory.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include "linux/proc_fs.h" #include <linux/delay.h> #include <mach/hardware.h> #include <linux/io.h> #include <asm/system.h> #include <asm/mach-types.h> /* The following are for MSM5100 on Gator */ #ifdef FEATURE_PM1000 #include "pm1000.h" #endif /* FEATURE_PM1000 */ /* The following are for MSM6050 on Bambi */ #ifdef FEATURE_PMIC_LCDKBD_LED_DRIVER #include "pm.h" #endif /* FEATURE_PMIC_LCDKBD_LED_DRIVER */ #ifdef DISP_DEVICE_18BPP #undef DISP_DEVICE_18BPP #define DISP_DEVICE_16BPP #endif #define QCIF_WIDTH 176 #define QCIF_HEIGHT 220 static void *DISP_CMD_PORT; static void *DISP_DATA_PORT; #define DISP_CMD_DISON 0xaf #define DISP_CMD_DISOFF 0xae #define DISP_CMD_DISNOR 0xa6 #define DISP_CMD_DISINV 0xa7 #define DISP_CMD_DISCTL 0xca #define DISP_CMD_GCP64 0xcb #define DISP_CMD_GCP16 0xcc #define DISP_CMD_GSSET 0xcd #define DISP_GS_2 0x02 #define DISP_GS_16 0x01 #define DISP_GS_64 0x00 #define DISP_CMD_SLPIN 0x95 #define DISP_CMD_SLPOUT 0x94 #define DISP_CMD_SD_PSET 0x75 #define DISP_CMD_MD_PSET 0x76 #define DISP_CMD_SD_CSET 0x15 #define DISP_CMD_MD_CSET 0x16 #define DISP_CMD_DATCTL 0xbc #define DISP_DATCTL_666 0x08 #define DISP_DATCTL_565 0x28 #define DISP_DATCTL_444 0x38 #define DISP_CMD_RAMWR 0x5c #define DISP_CMD_RAMRD 0x5d #define DISP_CMD_PTLIN 0xa8 #define DISP_CMD_PTLOUT 0xa9 #define DISP_CMD_ASCSET 0xaa #define DISP_CMD_SCSTART 0xab #define DISP_CMD_VOLCTL 0xc6 #define DISP_VOLCTL_TONE 0x80 #define DISP_CMD_NOp 0x25 #define DISP_CMD_OSSEL 0xd0 #define DISP_CMD_3500KSET 0xd1 #define DISP_CMD_3500KEND 0xd2 #define DISP_CMD_14MSET 0xd3 #define DISP_CMD_14MEND 0xd4 #define DISP_CMD_OUT(cmd) outpw(DISP_CMD_PORT, cmd); #define DISP_DATA_OUT(data) outpw(DISP_DATA_PORT, data); #define DISP_DATA_IN() inpw(DISP_DATA_PORT); /* Epson device column number starts at 2 */ #define DISP_SET_RECT(ulhc_row, lrhc_row, ulhc_col, lrhc_col) \ DISP_CMD_OUT(DISP_CMD_SD_PSET) \ DISP_DATA_OUT((ulhc_row) & 0xFF) \ DISP_DATA_OUT((ulhc_row) >> 8) \ DISP_DATA_OUT((lrhc_row) & 0xFF) \ DISP_DATA_OUT((lrhc_row) >> 8) \ DISP_CMD_OUT(DISP_CMD_SD_CSET) \ DISP_DATA_OUT(((ulhc_col)+2) & 0xFF) \ DISP_DATA_OUT(((ulhc_col)+2) >> 8) \ DISP_DATA_OUT(((lrhc_col)+2) & 0xFF) \ DISP_DATA_OUT(((lrhc_col)+2) >> 8) #define DISP_MIN_CONTRAST 0 #define DISP_MAX_CONTRAST 127 #define DISP_DEFAULT_CONTRAST 80 #define DISP_MIN_BACKLIGHT 0 #define DISP_MAX_BACKLIGHT 15 #define DISP_DEFAULT_BACKLIGHT 2 #define WAIT_SEC(sec) mdelay((sec)/1000) static word disp_area_start_row; static word disp_area_end_row; static byte disp_contrast = DISP_DEFAULT_CONTRAST; static boolean disp_powered_up; static boolean disp_initialized = FALSE; /* For some reason the contrast set at init time is not good. Need to do * it again */ static boolean display_on = FALSE; static void epsonQcif_disp_init(struct platform_device *pdev); static void epsonQcif_disp_set_contrast(word contrast); static void epsonQcif_disp_set_display_area(word start_row, word end_row); static int epsonQcif_disp_off(struct platform_device *pdev); static int epsonQcif_disp_on(struct platform_device *pdev); static void epsonQcif_disp_set_rect(int x, int y, int xres, int yres); volatile word databack; static void epsonQcif_disp_init(struct platform_device *pdev) { struct msm_fb_data_type *mfd; int i; if (disp_initialized) return; mfd = platform_get_drvdata(pdev); DISP_CMD_PORT = mfd->cmd_port; DISP_DATA_PORT = mfd->data_port; /* Sleep in */ DISP_CMD_OUT(DISP_CMD_SLPIN); /* Display off */ DISP_CMD_OUT(DISP_CMD_DISOFF); /* Display normal */ DISP_CMD_OUT(DISP_CMD_DISNOR); /* Set data mode */ DISP_CMD_OUT(DISP_CMD_DATCTL); DISP_DATA_OUT(DISP_DATCTL_565); /* Set display timing */ DISP_CMD_OUT(DISP_CMD_DISCTL); DISP_DATA_OUT(0x1c); /* p1 */ DISP_DATA_OUT(0x02); /* p1 */ DISP_DATA_OUT(0x82); /* p2 */ DISP_DATA_OUT(0x00); /* p3 */ DISP_DATA_OUT(0x00); /* p4 */ DISP_DATA_OUT(0xe0); /* p5 */ DISP_DATA_OUT(0x00); /* p5 */ DISP_DATA_OUT(0xdc); /* p6 */ DISP_DATA_OUT(0x00); /* p6 */ DISP_DATA_OUT(0x02); /* p7 */ DISP_DATA_OUT(0x00); /* p8 */ /* Set 64 gray scale level */ DISP_CMD_OUT(DISP_CMD_GCP64); DISP_DATA_OUT(0x08); /* p01 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0x2a); /* p02 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0x4e); /* p03 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0x6b); /* p04 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0x88); /* p05 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0xa3); /* p06 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0xba); /* p07 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0xd1); /* p08 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0xe5); /* p09 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0xf3); /* p10 */ DISP_DATA_OUT(0x00); DISP_DATA_OUT(0x03); /* p11 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x13); /* p12 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x22); /* p13 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x2f); /* p14 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x3b); /* p15 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x46); /* p16 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x51); /* p17 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x5b); /* p18 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x64); /* p19 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x6c); /* p20 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x74); /* p21 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x7c); /* p22 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x83); /* p23 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x8a); /* p24 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x91); /* p25 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x98); /* p26 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x9f); /* p27 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xa6); /* p28 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xac); /* p29 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xb2); /* p30 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xb7); /* p31 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xbc); /* p32 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xc1); /* p33 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xc6); /* p34 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xcb); /* p35 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xd0); /* p36 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xd4); /* p37 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xd8); /* p38 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xdc); /* p39 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xe0); /* p40 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xe4); /* p41 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xe8); /* p42 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xec); /* p43 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xf0); /* p44 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xf4); /* p45 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xf8); /* p46 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xfb); /* p47 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xfe); /* p48 */ DISP_DATA_OUT(0x01); DISP_DATA_OUT(0x01); /* p49 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x03); /* p50 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x05); /* p51 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x07); /* p52 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x09); /* p53 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x0b); /* p54 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x0d); /* p55 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x0f); /* p56 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x11); /* p57 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x13); /* p58 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x15); /* p59 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x17); /* p60 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x19); /* p61 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x1b); /* p62 */ DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x1c); /* p63 */ DISP_DATA_OUT(0x02); /* Set 16 gray scale level */ DISP_CMD_OUT(DISP_CMD_GCP16); DISP_DATA_OUT(0x1a); /* p01 */ DISP_DATA_OUT(0x32); /* p02 */ DISP_DATA_OUT(0x42); /* p03 */ DISP_DATA_OUT(0x4c); /* p04 */ DISP_DATA_OUT(0x58); /* p05 */ DISP_DATA_OUT(0x5f); /* p06 */ DISP_DATA_OUT(0x66); /* p07 */ DISP_DATA_OUT(0x6b); /* p08 */ DISP_DATA_OUT(0x70); /* p09 */ DISP_DATA_OUT(0x74); /* p10 */ DISP_DATA_OUT(0x78); /* p11 */ DISP_DATA_OUT(0x7b); /* p12 */ DISP_DATA_OUT(0x7e); /* p13 */ DISP_DATA_OUT(0x80); /* p14 */ DISP_DATA_OUT(0x82); /* p15 */ /* Set DSP column */ DISP_CMD_OUT(DISP_CMD_MD_CSET); DISP_DATA_OUT(0xff); DISP_DATA_OUT(0x03); DISP_DATA_OUT(0xff); DISP_DATA_OUT(0x03); /* Set DSP page */ DISP_CMD_OUT(DISP_CMD_MD_PSET); DISP_DATA_OUT(0xff); DISP_DATA_OUT(0x01); DISP_DATA_OUT(0xff); DISP_DATA_OUT(0x01); /* Set ARM column */ DISP_CMD_OUT(DISP_CMD_SD_CSET); DISP_DATA_OUT(0x02); DISP_DATA_OUT(0x00); DISP_DATA_OUT((QCIF_WIDTH + 1) & 0xFF); DISP_DATA_OUT((QCIF_WIDTH + 1) >> 8); /* Set ARM page */ DISP_CMD_OUT(DISP_CMD_SD_PSET); DISP_DATA_OUT(0x00); DISP_DATA_OUT(0x00); DISP_DATA_OUT((QCIF_HEIGHT - 1) & 0xFF); DISP_DATA_OUT((QCIF_HEIGHT - 1) >> 8); /* Set 64 gray scales */ DISP_CMD_OUT(DISP_CMD_GSSET); DISP_DATA_OUT(DISP_GS_64); DISP_CMD_OUT(DISP_CMD_OSSEL); DISP_DATA_OUT(0); /* Sleep out */ DISP_CMD_OUT(DISP_CMD_SLPOUT); WAIT_SEC(40000); /* Initialize power IC */ DISP_CMD_OUT(DISP_CMD_VOLCTL); DISP_DATA_OUT(DISP_VOLCTL_TONE); WAIT_SEC(40000); /* Set electronic volume, d'xx */ DISP_CMD_OUT(DISP_CMD_VOLCTL); DISP_DATA_OUT(DISP_DEFAULT_CONTRAST); /* value from 0 to 127 */ /* Initialize display data */ DISP_SET_RECT(0, (QCIF_HEIGHT - 1), 0, (QCIF_WIDTH - 1)); DISP_CMD_OUT(DISP_CMD_RAMWR); for (i = 0; i < QCIF_HEIGHT * QCIF_WIDTH; i++) DISP_DATA_OUT(0xffff); DISP_CMD_OUT(DISP_CMD_RAMRD); databack = DISP_DATA_IN(); databack = DISP_DATA_IN(); databack = DISP_DATA_IN(); databack = DISP_DATA_IN(); WAIT_SEC(80000); DISP_CMD_OUT(DISP_CMD_DISON); disp_area_start_row = 0; disp_area_end_row = QCIF_HEIGHT - 1; disp_powered_up = TRUE; disp_initialized = TRUE; epsonQcif_disp_set_display_area(0, QCIF_HEIGHT - 1); display_on = TRUE; } static void epsonQcif_disp_set_rect(int x, int y, int xres, int yres) { if (!disp_initialized) return; DISP_SET_RECT(y, y + yres - 1, x, x + xres - 1); DISP_CMD_OUT(DISP_CMD_RAMWR); } static void epsonQcif_disp_set_display_area(word start_row, word end_row) { if (!disp_initialized) return; if ((start_row == disp_area_start_row) && (end_row == disp_area_end_row)) return; disp_area_start_row = start_row; disp_area_end_row = end_row; /* Range checking */ if (end_row >= QCIF_HEIGHT) end_row = QCIF_HEIGHT - 1; if (start_row > end_row) start_row = end_row; /* When display is not the full screen, gray scale is set to ** 2; otherwise it is set to 64. */ if ((start_row == 0) && (end_row == (QCIF_HEIGHT - 1))) { /* The whole screen */ DISP_CMD_OUT(DISP_CMD_PTLOUT); WAIT_SEC(10000); DISP_CMD_OUT(DISP_CMD_DISOFF); WAIT_SEC(100000); DISP_CMD_OUT(DISP_CMD_GSSET); DISP_DATA_OUT(DISP_GS_64); WAIT_SEC(100000); DISP_CMD_OUT(DISP_CMD_DISON); } else { /* partial screen */ DISP_CMD_OUT(DISP_CMD_PTLIN); DISP_DATA_OUT(start_row); DISP_DATA_OUT(start_row >> 8); DISP_DATA_OUT(end_row); DISP_DATA_OUT(end_row >> 8); DISP_CMD_OUT(DISP_CMD_GSSET); DISP_DATA_OUT(DISP_GS_2); } } static int epsonQcif_disp_off(struct platform_device *pdev) { if (!disp_initialized) epsonQcif_disp_init(pdev); if (display_on) { DISP_CMD_OUT(DISP_CMD_DISOFF); DISP_CMD_OUT(DISP_CMD_SLPIN); display_on = FALSE; } return 0; } static int epsonQcif_disp_on(struct platform_device *pdev) { if (!disp_initialized) epsonQcif_disp_init(pdev); if (!display_on) { DISP_CMD_OUT(DISP_CMD_SLPOUT); WAIT_SEC(40000); DISP_CMD_OUT(DISP_CMD_DISON); epsonQcif_disp_set_contrast(disp_contrast); display_on = TRUE; } return 0; } static void epsonQcif_disp_set_contrast(word contrast) { if (!disp_initialized) return; /* Initialize power IC, d'24 */ DISP_CMD_OUT(DISP_CMD_VOLCTL); DISP_DATA_OUT(DISP_VOLCTL_TONE); WAIT_SEC(40000); /* Set electronic volume, d'xx */ DISP_CMD_OUT(DISP_CMD_VOLCTL); if (contrast > 127) contrast = 127; DISP_DATA_OUT(contrast); /* value from 0 to 127 */ disp_contrast = (byte) contrast; } /* End disp_set_contrast */ static void epsonQcif_disp_clear_screen_area( word start_row, word end_row, word start_column, word end_column) { int32 i; /* Clear the display screen */ DISP_SET_RECT(start_row, end_row, start_column, end_column); DISP_CMD_OUT(DISP_CMD_RAMWR); i = (end_row - start_row + 1) * (end_column - start_column + 1); for (; i > 0; i--) DISP_DATA_OUT(0xffff); } static int __init epsonQcif_probe(struct platform_device *pdev) { msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = epsonQcif_probe, .driver = { .name = "ebi2_epson_qcif", }, }; static struct msm_fb_panel_data epsonQcif_panel_data = { .on = epsonQcif_disp_on, .off = epsonQcif_disp_off, .set_rect = epsonQcif_disp_set_rect, }; static struct platform_device this_device = { .name = "ebi2_epson_qcif", .id = 0, .dev = { .platform_data = &epsonQcif_panel_data, } }; static int __init epsonQcif_init(void) { int ret; struct msm_panel_info *pinfo; ret = platform_driver_register(&this_driver); if (!ret) { pinfo = &epsonQcif_panel_data.panel_info; pinfo->xres = QCIF_WIDTH; pinfo->yres = QCIF_HEIGHT; pinfo->type = EBI2_PANEL; pinfo->pdest = DISPLAY_2; pinfo->wait_cycle = 0x808000; pinfo->bpp = 16; pinfo->fb_num = 2; pinfo->lcd.vsync_enable = FALSE; ret = platform_device_register(&this_device); if (ret) platform_driver_unregister(&this_driver); } return ret; } module_init(epsonQcif_init);
gpl-2.0
nmenon/ti-linux-kernel-nm
drivers/macintosh/windfarm_fcu_controls.c
4594
14379
/* * Windfarm PowerMac thermal control. FCU fan control * * Copyright 2012 Benjamin Herrenschmidt, IBM Corp. * * Released under the term of the GNU GPL v2. */ #undef DEBUG #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/i2c.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> #include "windfarm.h" #include "windfarm_mpu.h" #define VERSION "1.0" #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif /* * This option is "weird" :) Basically, if you define this to 1 * the control loop for the RPMs fans (not PWMs) will apply the * correction factor obtained from the PID to the actual RPM * speed read from the FCU. * * If you define the below constant to 0, then it will be * applied to the setpoint RPM speed, that is basically the * speed we proviously "asked" for. * * I'm using 0 for now which is what therm_pm72 used to do and * what Darwin -apparently- does based on observed behaviour. */ #define RPM_PID_USE_ACTUAL_SPEED 0 /* Default min/max for pumps */ #define CPU_PUMP_OUTPUT_MAX 3200 #define CPU_PUMP_OUTPUT_MIN 1250 #define FCU_FAN_RPM 0 #define FCU_FAN_PWM 1 struct wf_fcu_priv { struct kref ref; struct i2c_client *i2c; struct mutex lock; struct list_head fan_list; int rpm_shift; }; struct wf_fcu_fan { struct list_head link; int id; s32 min, max, target; struct wf_fcu_priv *fcu_priv; struct wf_control ctrl; }; static void wf_fcu_release(struct kref *ref) { struct wf_fcu_priv *pv = container_of(ref, struct wf_fcu_priv, ref); kfree(pv); } static void wf_fcu_fan_release(struct wf_control *ct) { struct wf_fcu_fan *fan = ct->priv; kref_put(&fan->fcu_priv->ref, wf_fcu_release); kfree(fan); } static int wf_fcu_read_reg(struct wf_fcu_priv *pv, int reg, unsigned char *buf, int nb) { int tries, nr, nw; mutex_lock(&pv->lock); buf[0] = reg; tries = 0; for (;;) { nw = i2c_master_send(pv->i2c, buf, 1); if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100) break; msleep(10); ++tries; } if (nw <= 0) { pr_err("Failure writing address to FCU: %d", nw); nr = nw; goto bail; } tries = 0; for (;;) { nr = i2c_master_recv(pv->i2c, buf, nb); if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100) break; msleep(10); ++tries; } if (nr <= 0) pr_err("wf_fcu: Failure reading data from FCU: %d", nw); bail: mutex_unlock(&pv->lock); return nr; } static int wf_fcu_write_reg(struct wf_fcu_priv *pv, int reg, const unsigned char *ptr, int nb) { int tries, nw; unsigned char buf[16]; buf[0] = reg; memcpy(buf+1, ptr, nb); ++nb; tries = 0; for (;;) { nw = i2c_master_send(pv->i2c, buf, nb); if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100) break; msleep(10); ++tries; } if (nw < 0) pr_err("wf_fcu: Failure writing to FCU: %d", nw); return nw; } static int wf_fcu_fan_set_rpm(struct wf_control *ct, s32 value) { struct wf_fcu_fan *fan = ct->priv; struct wf_fcu_priv *pv = fan->fcu_priv; int rc, shift = pv->rpm_shift; unsigned char buf[2]; if (value < fan->min) value = fan->min; if (value > fan->max) value = fan->max; fan->target = value; buf[0] = value >> (8 - shift); buf[1] = value << shift; rc = wf_fcu_write_reg(pv, 0x10 + (fan->id * 2), buf, 2); if (rc < 0) return -EIO; return 0; } static int wf_fcu_fan_get_rpm(struct wf_control *ct, s32 *value) { struct wf_fcu_fan *fan = ct->priv; struct wf_fcu_priv *pv = fan->fcu_priv; int rc, reg_base, shift = pv->rpm_shift; unsigned char failure; unsigned char active; unsigned char buf[2]; rc = wf_fcu_read_reg(pv, 0xb, &failure, 1); if (rc != 1) return -EIO; if ((failure & (1 << fan->id)) != 0) return -EFAULT; rc = wf_fcu_read_reg(pv, 0xd, &active, 1); if (rc != 1) return -EIO; if ((active & (1 << fan->id)) == 0) return -ENXIO; /* Programmed value or real current speed */ #if RPM_PID_USE_ACTUAL_SPEED reg_base = 0x11; #else reg_base = 0x10; #endif rc = wf_fcu_read_reg(pv, reg_base + (fan->id * 2), buf, 2); if (rc != 2) return -EIO; *value = (buf[0] << (8 - shift)) | buf[1] >> shift; return 0; } static int wf_fcu_fan_set_pwm(struct wf_control *ct, s32 value) { struct wf_fcu_fan *fan = ct->priv; struct wf_fcu_priv *pv = fan->fcu_priv; unsigned char buf[2]; int rc; if (value < fan->min) value = fan->min; if (value > fan->max) value = fan->max; fan->target = value; value = (value * 2559) / 1000; buf[0] = value; rc = wf_fcu_write_reg(pv, 0x30 + (fan->id * 2), buf, 1); if (rc < 0) return -EIO; return 0; } static int wf_fcu_fan_get_pwm(struct wf_control *ct, s32 *value) { struct wf_fcu_fan *fan = ct->priv; struct wf_fcu_priv *pv = fan->fcu_priv; unsigned char failure; unsigned char active; unsigned char buf[2]; int rc; rc = wf_fcu_read_reg(pv, 0x2b, &failure, 1); if (rc != 1) return -EIO; if ((failure & (1 << fan->id)) != 0) return -EFAULT; rc = wf_fcu_read_reg(pv, 0x2d, &active, 1); if (rc != 1) return -EIO; if ((active & (1 << fan->id)) == 0) return -ENXIO; rc = wf_fcu_read_reg(pv, 0x30 + (fan->id * 2), buf, 1); if (rc != 1) return -EIO; *value = (((s32)buf[0]) * 1000) / 2559; return 0; } static s32 wf_fcu_fan_min(struct wf_control *ct) { struct wf_fcu_fan *fan = ct->priv; return fan->min; } static s32 wf_fcu_fan_max(struct wf_control *ct) { struct wf_fcu_fan *fan = ct->priv; return fan->max; } static const struct wf_control_ops wf_fcu_fan_rpm_ops = { .set_value = wf_fcu_fan_set_rpm, .get_value = wf_fcu_fan_get_rpm, .get_min = wf_fcu_fan_min, .get_max = wf_fcu_fan_max, .release = wf_fcu_fan_release, .owner = THIS_MODULE, }; static const struct wf_control_ops wf_fcu_fan_pwm_ops = { .set_value = wf_fcu_fan_set_pwm, .get_value = wf_fcu_fan_get_pwm, .get_min = wf_fcu_fan_min, .get_max = wf_fcu_fan_max, .release = wf_fcu_fan_release, .owner = THIS_MODULE, }; static void wf_fcu_get_pump_minmax(struct wf_fcu_fan *fan) { const struct mpu_data *mpu = wf_get_mpu(0); u16 pump_min = 0, pump_max = 0xffff; u16 tmp[4]; /* Try to fetch pumps min/max infos from eeprom */ if (mpu) { memcpy(&tmp, mpu->processor_part_num, 8); if (tmp[0] != 0xffff && tmp[1] != 0xffff) { pump_min = max(pump_min, tmp[0]); pump_max = min(pump_max, tmp[1]); } if (tmp[2] != 0xffff && tmp[3] != 0xffff) { pump_min = max(pump_min, tmp[2]); pump_max = min(pump_max, tmp[3]); } } /* Double check the values, this _IS_ needed as the EEPROM on * some dual 2.5Ghz G5s seem, at least, to have both min & max * same to the same value ... (grrrr) */ if (pump_min == pump_max || pump_min == 0 || pump_max == 0xffff) { pump_min = CPU_PUMP_OUTPUT_MIN; pump_max = CPU_PUMP_OUTPUT_MAX; } fan->min = pump_min; fan->max = pump_max; DBG("wf_fcu: pump min/max for %s set to: [%d..%d] RPM\n", fan->ctrl.name, pump_min, pump_max); } static void wf_fcu_get_rpmfan_minmax(struct wf_fcu_fan *fan) { struct wf_fcu_priv *pv = fan->fcu_priv; const struct mpu_data *mpu0 = wf_get_mpu(0); const struct mpu_data *mpu1 = wf_get_mpu(1); /* Default */ fan->min = 2400 >> pv->rpm_shift; fan->max = 56000 >> pv->rpm_shift; /* CPU fans have min/max in MPU */ if (mpu0 && !strcmp(fan->ctrl.name, "cpu-front-fan-0")) { fan->min = max(fan->min, (s32)mpu0->rminn_intake_fan); fan->max = min(fan->max, (s32)mpu0->rmaxn_intake_fan); goto bail; } if (mpu1 && !strcmp(fan->ctrl.name, "cpu-front-fan-1")) { fan->min = max(fan->min, (s32)mpu1->rminn_intake_fan); fan->max = min(fan->max, (s32)mpu1->rmaxn_intake_fan); goto bail; } if (mpu0 && !strcmp(fan->ctrl.name, "cpu-rear-fan-0")) { fan->min = max(fan->min, (s32)mpu0->rminn_exhaust_fan); fan->max = min(fan->max, (s32)mpu0->rmaxn_exhaust_fan); goto bail; } if (mpu1 && !strcmp(fan->ctrl.name, "cpu-rear-fan-1")) { fan->min = max(fan->min, (s32)mpu1->rminn_exhaust_fan); fan->max = min(fan->max, (s32)mpu1->rmaxn_exhaust_fan); goto bail; } /* Rackmac variants, we just use mpu0 intake */ if (!strncmp(fan->ctrl.name, "cpu-fan", 7)) { fan->min = max(fan->min, (s32)mpu0->rminn_intake_fan); fan->max = min(fan->max, (s32)mpu0->rmaxn_intake_fan); goto bail; } bail: DBG("wf_fcu: fan min/max for %s set to: [%d..%d] RPM\n", fan->ctrl.name, fan->min, fan->max); } static void wf_fcu_add_fan(struct wf_fcu_priv *pv, const char *name, int type, int id) { struct wf_fcu_fan *fan; fan = kzalloc(sizeof(*fan), GFP_KERNEL); if (!fan) return; fan->fcu_priv = pv; fan->id = id; fan->ctrl.name = name; fan->ctrl.priv = fan; /* min/max is oddball but the code comes from * therm_pm72 which seems to work so ... */ if (type == FCU_FAN_RPM) { if (!strncmp(name, "cpu-pump", strlen("cpu-pump"))) wf_fcu_get_pump_minmax(fan); else wf_fcu_get_rpmfan_minmax(fan); fan->ctrl.type = WF_CONTROL_RPM_FAN; fan->ctrl.ops = &wf_fcu_fan_rpm_ops; } else { fan->min = 10; fan->max = 100; fan->ctrl.type = WF_CONTROL_PWM_FAN; fan->ctrl.ops = &wf_fcu_fan_pwm_ops; } if (wf_register_control(&fan->ctrl)) { pr_err("wf_fcu: Failed to register fan %s\n", name); kfree(fan); return; } list_add(&fan->link, &pv->fan_list); kref_get(&pv->ref); } static void wf_fcu_lookup_fans(struct wf_fcu_priv *pv) { /* Translation of device-tree location properties to * windfarm fan names */ static const struct { const char *dt_name; /* Device-tree name */ const char *ct_name; /* Control name */ } loc_trans[] = { { "BACKSIDE", "backside-fan", }, { "SYS CTRLR FAN", "backside-fan", }, { "DRIVE BAY", "drive-bay-fan", }, { "SLOT", "slots-fan", }, { "PCI FAN", "slots-fan", }, { "CPU A INTAKE", "cpu-front-fan-0", }, { "CPU A EXHAUST", "cpu-rear-fan-0", }, { "CPU B INTAKE", "cpu-front-fan-1", }, { "CPU B EXHAUST", "cpu-rear-fan-1", }, { "CPU A PUMP", "cpu-pump-0", }, { "CPU B PUMP", "cpu-pump-1", }, { "CPU A 1", "cpu-fan-a-0", }, { "CPU A 2", "cpu-fan-b-0", }, { "CPU A 3", "cpu-fan-c-0", }, { "CPU B 1", "cpu-fan-a-1", }, { "CPU B 2", "cpu-fan-b-1", }, { "CPU B 3", "cpu-fan-c-1", }, }; struct device_node *np = NULL, *fcu = pv->i2c->dev.of_node; int i; DBG("Looking up FCU controls in device-tree...\n"); while ((np = of_get_next_child(fcu, np)) != NULL) { int id, type = -1; const char *loc; const char *name; const u32 *reg; DBG(" control: %s, type: %s\n", np->name, np->type); /* Detect control type */ if (!strcmp(np->type, "fan-rpm-control") || !strcmp(np->type, "fan-rpm")) type = FCU_FAN_RPM; if (!strcmp(np->type, "fan-pwm-control") || !strcmp(np->type, "fan-pwm")) type = FCU_FAN_PWM; /* Only care about fans for now */ if (type == -1) continue; /* Lookup for a matching location */ loc = of_get_property(np, "location", NULL); reg = of_get_property(np, "reg", NULL); if (loc == NULL || reg == NULL) continue; DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg); for (i = 0; i < ARRAY_SIZE(loc_trans); i++) { if (strncmp(loc, loc_trans[i].dt_name, strlen(loc_trans[i].dt_name))) continue; name = loc_trans[i].ct_name; DBG(" location match, name: %s\n", name); if (type == FCU_FAN_RPM) id = ((*reg) - 0x10) / 2; else id = ((*reg) - 0x30) / 2; if (id > 7) { pr_warning("wf_fcu: Can't parse " "fan ID in device-tree for %s\n", np->full_name); break; } wf_fcu_add_fan(pv, name, type, id); break; } } } static void wf_fcu_default_fans(struct wf_fcu_priv *pv) { /* We only support the default fans for PowerMac7,2 */ if (!of_machine_is_compatible("PowerMac7,2")) return; wf_fcu_add_fan(pv, "backside-fan", FCU_FAN_PWM, 1); wf_fcu_add_fan(pv, "drive-bay-fan", FCU_FAN_RPM, 2); wf_fcu_add_fan(pv, "slots-fan", FCU_FAN_PWM, 2); wf_fcu_add_fan(pv, "cpu-front-fan-0", FCU_FAN_RPM, 3); wf_fcu_add_fan(pv, "cpu-rear-fan-0", FCU_FAN_RPM, 4); wf_fcu_add_fan(pv, "cpu-front-fan-1", FCU_FAN_RPM, 5); wf_fcu_add_fan(pv, "cpu-rear-fan-1", FCU_FAN_RPM, 6); } static int wf_fcu_init_chip(struct wf_fcu_priv *pv) { unsigned char buf = 0xff; int rc; rc = wf_fcu_write_reg(pv, 0xe, &buf, 1); if (rc < 0) return -EIO; rc = wf_fcu_write_reg(pv, 0x2e, &buf, 1); if (rc < 0) return -EIO; rc = wf_fcu_read_reg(pv, 0, &buf, 1); if (rc < 0) return -EIO; pv->rpm_shift = (buf == 1) ? 2 : 3; pr_debug("wf_fcu: FCU Initialized, RPM fan shift is %d\n", pv->rpm_shift); return 0; } static int wf_fcu_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wf_fcu_priv *pv; pv = kzalloc(sizeof(*pv), GFP_KERNEL); if (!pv) return -ENOMEM; kref_init(&pv->ref); mutex_init(&pv->lock); INIT_LIST_HEAD(&pv->fan_list); pv->i2c = client; /* * First we must start the FCU which will query the * shift value to apply to RPMs */ if (wf_fcu_init_chip(pv)) { pr_err("wf_fcu: Initialization failed !\n"); kfree(pv); return -ENXIO; } /* First lookup fans in the device-tree */ wf_fcu_lookup_fans(pv); /* * Older machines don't have the device-tree entries * we are looking for, just hard code the list */ if (list_empty(&pv->fan_list)) wf_fcu_default_fans(pv); /* Still no fans ? FAIL */ if (list_empty(&pv->fan_list)) { pr_err("wf_fcu: Failed to find fans for your machine\n"); kfree(pv); return -ENODEV; } dev_set_drvdata(&client->dev, pv); return 0; } static int wf_fcu_remove(struct i2c_client *client) { struct wf_fcu_priv *pv = dev_get_drvdata(&client->dev); struct wf_fcu_fan *fan; while (!list_empty(&pv->fan_list)) { fan = list_first_entry(&pv->fan_list, struct wf_fcu_fan, link); list_del(&fan->link); wf_unregister_control(&fan->ctrl); } kref_put(&pv->ref, wf_fcu_release); return 0; } static const struct i2c_device_id wf_fcu_id[] = { { "MAC,fcu", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wf_fcu_id); static struct i2c_driver wf_fcu_driver = { .driver = { .name = "wf_fcu", }, .probe = wf_fcu_probe, .remove = wf_fcu_remove, .id_table = wf_fcu_id, }; module_i2c_driver(wf_fcu_driver); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("FCU control objects for PowerMacs thermal control"); MODULE_LICENSE("GPL");
gpl-2.0
ND-3500/golden_cm10.2_kernel
sound/drivers/vx/vx_core.c
4594
20708
/* * Driver for Digigram VX soundcards * * Hardware core part * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/device.h> #include <linux/firmware.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/asoundef.h> #include <sound/info.h> #include <asm/io.h> #include <sound/vx_core.h> #include "vx_cmd.h" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Common routines for Digigram VX drivers"); MODULE_LICENSE("GPL"); /* * vx_check_reg_bit - wait for the specified bit is set/reset on a register * @reg: register to check * @mask: bit mask * @bit: resultant bit to be checked * @time: time-out of loop in msec * * returns zero if a bit matches, or a negative error code. */ int snd_vx_check_reg_bit(struct vx_core *chip, int reg, int mask, int bit, int time) { unsigned long end_time = jiffies + (time * HZ + 999) / 1000; #ifdef CONFIG_SND_DEBUG static char *reg_names[VX_REG_MAX] = { "ICR", "CVR", "ISR", "IVR", "RXH", "RXM", "RXL", "DMA", "CDSP", "RFREQ", "RUER/V2", "DATA", "MEMIRQ", "ACQ", "BIT0", "BIT1", "MIC0", "MIC1", "MIC2", "MIC3", "INTCSR", "CNTRL", "GPIOC", "LOFREQ", "HIFREQ", "CSUER", "RUER" }; #endif do { if ((snd_vx_inb(chip, reg) & mask) == bit) return 0; //msleep(10); } while (time_after_eq(end_time, jiffies)); snd_printd(KERN_DEBUG "vx_check_reg_bit: timeout, reg=%s, mask=0x%x, val=0x%x\n", reg_names[reg], mask, snd_vx_inb(chip, reg)); return -EIO; } EXPORT_SYMBOL(snd_vx_check_reg_bit); /* * vx_send_irq_dsp - set command irq bit * @num: the requested IRQ type, IRQ_XXX * * this triggers the specified IRQ request * returns 0 if successful, or a negative error code. * */ static int vx_send_irq_dsp(struct vx_core *chip, int num) { int nirq; /* wait for Hc = 0 */ if (snd_vx_check_reg_bit(chip, VX_CVR, CVR_HC, 0, 200) < 0) return -EIO; nirq = num; if (vx_has_new_dsp(chip)) nirq += VXP_IRQ_OFFSET; vx_outb(chip, CVR, (nirq >> 1) | CVR_HC); return 0; } /* * vx_reset_chk - reset CHK bit on ISR * * returns 0 if successful, or a negative error code. */ static int vx_reset_chk(struct vx_core *chip) { /* Reset irq CHK */ if (vx_send_irq_dsp(chip, IRQ_RESET_CHK) < 0) return -EIO; /* Wait until CHK = 0 */ if (vx_check_isr(chip, ISR_CHK, 0, 200) < 0) return -EIO; return 0; } /* * vx_transfer_end - terminate message transfer * @cmd: IRQ message to send (IRQ_MESS_XXX_END) * * returns 0 if successful, or a negative error code. * the error code can be VX-specific, retrieved via vx_get_error(). * NB: call with spinlock held! */ static int vx_transfer_end(struct vx_core *chip, int cmd) { int err; if ((err = vx_reset_chk(chip)) < 0) return err; /* irq MESS_READ/WRITE_END */ if ((err = vx_send_irq_dsp(chip, cmd)) < 0) return err; /* Wait CHK = 1 */ if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0) return err; /* If error, Read RX */ if ((err = vx_inb(chip, ISR)) & ISR_ERR) { if ((err = vx_wait_for_rx_full(chip)) < 0) { snd_printd(KERN_DEBUG "transfer_end: error in rx_full\n"); return err; } err = vx_inb(chip, RXH) << 16; err |= vx_inb(chip, RXM) << 8; err |= vx_inb(chip, RXL); snd_printd(KERN_DEBUG "transfer_end: error = 0x%x\n", err); return -(VX_ERR_MASK | err); } return 0; } /* * vx_read_status - return the status rmh * @rmh: rmh record to store the status * * returns 0 if successful, or a negative error code. * the error code can be VX-specific, retrieved via vx_get_error(). * NB: call with spinlock held! */ static int vx_read_status(struct vx_core *chip, struct vx_rmh *rmh) { int i, err, val, size; /* no read necessary? */ if (rmh->DspStat == RMH_SSIZE_FIXED && rmh->LgStat == 0) return 0; /* Wait for RX full (with timeout protection) * The first word of status is in RX */ err = vx_wait_for_rx_full(chip); if (err < 0) return err; /* Read RX */ val = vx_inb(chip, RXH) << 16; val |= vx_inb(chip, RXM) << 8; val |= vx_inb(chip, RXL); /* If status given by DSP, let's decode its size */ switch (rmh->DspStat) { case RMH_SSIZE_ARG: size = val & 0xff; rmh->Stat[0] = val & 0xffff00; rmh->LgStat = size + 1; break; case RMH_SSIZE_MASK: /* Let's count the arg numbers from a mask */ rmh->Stat[0] = val; size = 0; while (val) { if (val & 0x01) size++; val >>= 1; } rmh->LgStat = size + 1; break; default: /* else retrieve the status length given by the driver */ size = rmh->LgStat; rmh->Stat[0] = val; /* Val is the status 1st word */ size--; /* hence adjust remaining length */ break; } if (size < 1) return 0; if (snd_BUG_ON(size > SIZE_MAX_STATUS)) return -EINVAL; for (i = 1; i <= size; i++) { /* trigger an irq MESS_WRITE_NEXT */ err = vx_send_irq_dsp(chip, IRQ_MESS_WRITE_NEXT); if (err < 0) return err; /* Wait for RX full (with timeout protection) */ err = vx_wait_for_rx_full(chip); if (err < 0) return err; rmh->Stat[i] = vx_inb(chip, RXH) << 16; rmh->Stat[i] |= vx_inb(chip, RXM) << 8; rmh->Stat[i] |= vx_inb(chip, RXL); } return vx_transfer_end(chip, IRQ_MESS_WRITE_END); } #define MASK_MORE_THAN_1_WORD_COMMAND 0x00008000 #define MASK_1_WORD_COMMAND 0x00ff7fff /* * vx_send_msg_nolock - send a DSP message and read back the status * @rmh: the rmh record to send and receive * * returns 0 if successful, or a negative error code. * the error code can be VX-specific, retrieved via vx_get_error(). * * this function doesn't call spinlock at all. */ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh) { int i, err; if (chip->chip_status & VX_STAT_IS_STALE) return -EBUSY; if ((err = vx_reset_chk(chip)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: vx_reset_chk error\n"); return err; } #if 0 printk(KERN_DEBUG "rmh: cmd = 0x%06x, length = %d, stype = %d\n", rmh->Cmd[0], rmh->LgCmd, rmh->DspStat); if (rmh->LgCmd > 1) { printk(KERN_DEBUG " "); for (i = 1; i < rmh->LgCmd; i++) printk("0x%06x ", rmh->Cmd[i]); printk("\n"); } #endif /* Check bit M is set according to length of the command */ if (rmh->LgCmd > 1) rmh->Cmd[0] |= MASK_MORE_THAN_1_WORD_COMMAND; else rmh->Cmd[0] &= MASK_1_WORD_COMMAND; /* Wait for TX empty */ if ((err = vx_wait_isr_bit(chip, ISR_TX_EMPTY)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: wait tx empty error\n"); return err; } /* Write Cmd[0] */ vx_outb(chip, TXH, (rmh->Cmd[0] >> 16) & 0xff); vx_outb(chip, TXM, (rmh->Cmd[0] >> 8) & 0xff); vx_outb(chip, TXL, rmh->Cmd[0] & 0xff); /* Trigger irq MESSAGE */ if ((err = vx_send_irq_dsp(chip, IRQ_MESSAGE)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: send IRQ_MESSAGE error\n"); return err; } /* Wait for CHK = 1 */ if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0) return err; /* If error, get error value from RX */ if (vx_inb(chip, ISR) & ISR_ERR) { if ((err = vx_wait_for_rx_full(chip)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: rx_full read error\n"); return err; } err = vx_inb(chip, RXH) << 16; err |= vx_inb(chip, RXM) << 8; err |= vx_inb(chip, RXL); snd_printd(KERN_DEBUG "msg got error = 0x%x at cmd[0]\n", err); err = -(VX_ERR_MASK | err); return err; } /* Send the other words */ if (rmh->LgCmd > 1) { for (i = 1; i < rmh->LgCmd; i++) { /* Wait for TX ready */ if ((err = vx_wait_isr_bit(chip, ISR_TX_READY)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: tx_ready error\n"); return err; } /* Write Cmd[i] */ vx_outb(chip, TXH, (rmh->Cmd[i] >> 16) & 0xff); vx_outb(chip, TXM, (rmh->Cmd[i] >> 8) & 0xff); vx_outb(chip, TXL, rmh->Cmd[i] & 0xff); /* Trigger irq MESS_READ_NEXT */ if ((err = vx_send_irq_dsp(chip, IRQ_MESS_READ_NEXT)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: IRQ_READ_NEXT error\n"); return err; } } /* Wait for TX empty */ if ((err = vx_wait_isr_bit(chip, ISR_TX_READY)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: TX_READY error\n"); return err; } /* End of transfer */ err = vx_transfer_end(chip, IRQ_MESS_READ_END); if (err < 0) return err; } return vx_read_status(chip, rmh); } /* * vx_send_msg - send a DSP message with spinlock * @rmh: the rmh record to send and receive * * returns 0 if successful, or a negative error code. * see vx_send_msg_nolock(). */ int vx_send_msg(struct vx_core *chip, struct vx_rmh *rmh) { unsigned long flags; int err; spin_lock_irqsave(&chip->lock, flags); err = vx_send_msg_nolock(chip, rmh); spin_unlock_irqrestore(&chip->lock, flags); return err; } /* * vx_send_rih_nolock - send an RIH to xilinx * @cmd: the command to send * * returns 0 if successful, or a negative error code. * the error code can be VX-specific, retrieved via vx_get_error(). * * this function doesn't call spinlock at all. * * unlike RMH, no command is sent to DSP. */ int vx_send_rih_nolock(struct vx_core *chip, int cmd) { int err; if (chip->chip_status & VX_STAT_IS_STALE) return -EBUSY; #if 0 printk(KERN_DEBUG "send_rih: cmd = 0x%x\n", cmd); #endif if ((err = vx_reset_chk(chip)) < 0) return err; /* send the IRQ */ if ((err = vx_send_irq_dsp(chip, cmd)) < 0) return err; /* Wait CHK = 1 */ if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0) return err; /* If error, read RX */ if (vx_inb(chip, ISR) & ISR_ERR) { if ((err = vx_wait_for_rx_full(chip)) < 0) return err; err = vx_inb(chip, RXH) << 16; err |= vx_inb(chip, RXM) << 8; err |= vx_inb(chip, RXL); return -(VX_ERR_MASK | err); } return 0; } /* * vx_send_rih - send an RIH with spinlock * @cmd: the command to send * * see vx_send_rih_nolock(). */ int vx_send_rih(struct vx_core *chip, int cmd) { unsigned long flags; int err; spin_lock_irqsave(&chip->lock, flags); err = vx_send_rih_nolock(chip, cmd); spin_unlock_irqrestore(&chip->lock, flags); return err; } #define END_OF_RESET_WAIT_TIME 500 /* us */ /** * snd_vx_boot_xilinx - boot up the xilinx interface * @boot: the boot record to load */ int snd_vx_load_boot_image(struct vx_core *chip, const struct firmware *boot) { unsigned int i; int no_fillup = vx_has_new_dsp(chip); /* check the length of boot image */ if (boot->size <= 0) return -EINVAL; if (boot->size % 3) return -EINVAL; #if 0 { /* more strict check */ unsigned int c = ((u32)boot->data[0] << 16) | ((u32)boot->data[1] << 8) | boot->data[2]; if (boot->size != (c + 2) * 3) return -EINVAL; } #endif /* reset dsp */ vx_reset_dsp(chip); udelay(END_OF_RESET_WAIT_TIME); /* another wait? */ /* download boot strap */ for (i = 0; i < 0x600; i += 3) { if (i >= boot->size) { if (no_fillup) break; if (vx_wait_isr_bit(chip, ISR_TX_EMPTY) < 0) { snd_printk(KERN_ERR "dsp boot failed at %d\n", i); return -EIO; } vx_outb(chip, TXH, 0); vx_outb(chip, TXM, 0); vx_outb(chip, TXL, 0); } else { const unsigned char *image = boot->data + i; if (vx_wait_isr_bit(chip, ISR_TX_EMPTY) < 0) { snd_printk(KERN_ERR "dsp boot failed at %d\n", i); return -EIO; } vx_outb(chip, TXH, image[0]); vx_outb(chip, TXM, image[1]); vx_outb(chip, TXL, image[2]); } } return 0; } EXPORT_SYMBOL(snd_vx_load_boot_image); /* * vx_test_irq_src - query the source of interrupts * * called from irq handler only */ static int vx_test_irq_src(struct vx_core *chip, unsigned int *ret) { int err; vx_init_rmh(&chip->irq_rmh, CMD_TEST_IT); spin_lock(&chip->lock); err = vx_send_msg_nolock(chip, &chip->irq_rmh); if (err < 0) *ret = 0; else *ret = chip->irq_rmh.Stat[0]; spin_unlock(&chip->lock); return err; } /* * vx_interrupt - soft irq handler */ static void vx_interrupt(unsigned long private_data) { struct vx_core *chip = (struct vx_core *) private_data; unsigned int events; if (chip->chip_status & VX_STAT_IS_STALE) return; if (vx_test_irq_src(chip, &events) < 0) return; #if 0 if (events & 0x000800) printk(KERN_ERR "DSP Stream underrun ! IRQ events = 0x%x\n", events); #endif // printk(KERN_DEBUG "IRQ events = 0x%x\n", events); /* We must prevent any application using this DSP * and block any further request until the application * either unregisters or reloads the DSP */ if (events & FATAL_DSP_ERROR) { snd_printk(KERN_ERR "vx_core: fatal DSP error!!\n"); return; } /* The start on time code conditions are filled (ie the time code * received by the board is equal to one of those given to it). */ if (events & TIME_CODE_EVENT_PENDING) ; /* so far, nothing to do yet */ /* The frequency has changed on the board (UER mode). */ if (events & FREQUENCY_CHANGE_EVENT_PENDING) vx_change_frequency(chip); /* update the pcm streams */ vx_pcm_update_intr(chip, events); } /** * snd_vx_irq_handler - interrupt handler */ irqreturn_t snd_vx_irq_handler(int irq, void *dev) { struct vx_core *chip = dev; if (! (chip->chip_status & VX_STAT_CHIP_INIT) || (chip->chip_status & VX_STAT_IS_STALE)) return IRQ_NONE; if (! vx_test_and_ack(chip)) tasklet_schedule(&chip->tq); return IRQ_HANDLED; } EXPORT_SYMBOL(snd_vx_irq_handler); /* */ static void vx_reset_board(struct vx_core *chip, int cold_reset) { if (snd_BUG_ON(!chip->ops->reset_board)) return; /* current source, later sync'ed with target */ chip->audio_source = VX_AUDIO_SRC_LINE; if (cold_reset) { chip->audio_source_target = chip->audio_source; chip->clock_source = INTERNAL_QUARTZ; chip->clock_mode = VX_CLOCK_MODE_AUTO; chip->freq = 48000; chip->uer_detected = VX_UER_MODE_NOT_PRESENT; chip->uer_bits = SNDRV_PCM_DEFAULT_CON_SPDIF; } chip->ops->reset_board(chip, cold_reset); vx_reset_codec(chip, cold_reset); vx_set_internal_clock(chip, chip->freq); /* Reset the DSP */ vx_reset_dsp(chip); if (vx_is_pcmcia(chip)) { /* Acknowledge any pending IRQ and reset the MEMIRQ flag. */ vx_test_and_ack(chip); vx_validate_irq(chip, 1); } /* init CBits */ vx_set_iec958_status(chip, chip->uer_bits); } /* * proc interface */ static void vx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct vx_core *chip = entry->private_data; static char *audio_src_vxp[] = { "Line", "Mic", "Digital" }; static char *audio_src_vx2[] = { "Analog", "Analog", "Digital" }; static char *clock_mode[] = { "Auto", "Internal", "External" }; static char *clock_src[] = { "Internal", "External" }; static char *uer_type[] = { "Consumer", "Professional", "Not Present" }; snd_iprintf(buffer, "%s\n", chip->card->longname); snd_iprintf(buffer, "Xilinx Firmware: %s\n", chip->chip_status & VX_STAT_XILINX_LOADED ? "Loaded" : "No"); snd_iprintf(buffer, "Device Initialized: %s\n", chip->chip_status & VX_STAT_DEVICE_INIT ? "Yes" : "No"); snd_iprintf(buffer, "DSP audio info:"); if (chip->audio_info & VX_AUDIO_INFO_REAL_TIME) snd_iprintf(buffer, " realtime"); if (chip->audio_info & VX_AUDIO_INFO_OFFLINE) snd_iprintf(buffer, " offline"); if (chip->audio_info & VX_AUDIO_INFO_MPEG1) snd_iprintf(buffer, " mpeg1"); if (chip->audio_info & VX_AUDIO_INFO_MPEG2) snd_iprintf(buffer, " mpeg2"); if (chip->audio_info & VX_AUDIO_INFO_LINEAR_8) snd_iprintf(buffer, " linear8"); if (chip->audio_info & VX_AUDIO_INFO_LINEAR_16) snd_iprintf(buffer, " linear16"); if (chip->audio_info & VX_AUDIO_INFO_LINEAR_24) snd_iprintf(buffer, " linear24"); snd_iprintf(buffer, "\n"); snd_iprintf(buffer, "Input Source: %s\n", vx_is_pcmcia(chip) ? audio_src_vxp[chip->audio_source] : audio_src_vx2[chip->audio_source]); snd_iprintf(buffer, "Clock Mode: %s\n", clock_mode[chip->clock_mode]); snd_iprintf(buffer, "Clock Source: %s\n", clock_src[chip->clock_source]); snd_iprintf(buffer, "Frequency: %d\n", chip->freq); snd_iprintf(buffer, "Detected Frequency: %d\n", chip->freq_detected); snd_iprintf(buffer, "Detected UER type: %s\n", uer_type[chip->uer_detected]); snd_iprintf(buffer, "Min/Max/Cur IBL: %d/%d/%d (granularity=%d)\n", chip->ibl.min_size, chip->ibl.max_size, chip->ibl.size, chip->ibl.granularity); } static void vx_proc_init(struct vx_core *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "vx-status", &entry)) snd_info_set_text_ops(entry, chip, vx_proc_read); } /** * snd_vx_dsp_boot - load the DSP boot */ int snd_vx_dsp_boot(struct vx_core *chip, const struct firmware *boot) { int err; int cold_reset = !(chip->chip_status & VX_STAT_DEVICE_INIT); vx_reset_board(chip, cold_reset); vx_validate_irq(chip, 0); if ((err = snd_vx_load_boot_image(chip, boot)) < 0) return err; msleep(10); return 0; } EXPORT_SYMBOL(snd_vx_dsp_boot); /** * snd_vx_dsp_load - load the DSP image */ int snd_vx_dsp_load(struct vx_core *chip, const struct firmware *dsp) { unsigned int i; int err; unsigned int csum = 0; const unsigned char *image, *cptr; if (dsp->size % 3) return -EINVAL; vx_toggle_dac_mute(chip, 1); /* Transfert data buffer from PC to DSP */ for (i = 0; i < dsp->size; i += 3) { image = dsp->data + i; /* Wait DSP ready for a new read */ if ((err = vx_wait_isr_bit(chip, ISR_TX_EMPTY)) < 0) { printk(KERN_ERR "dsp loading error at position %d\n", i); return err; } cptr = image; csum ^= *cptr; csum = (csum >> 24) | (csum << 8); vx_outb(chip, TXH, *cptr++); csum ^= *cptr; csum = (csum >> 24) | (csum << 8); vx_outb(chip, TXM, *cptr++); csum ^= *cptr; csum = (csum >> 24) | (csum << 8); vx_outb(chip, TXL, *cptr++); } snd_printdd(KERN_DEBUG "checksum = 0x%08x\n", csum); msleep(200); if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0) return err; vx_toggle_dac_mute(chip, 0); vx_test_and_ack(chip); vx_validate_irq(chip, 1); return 0; } EXPORT_SYMBOL(snd_vx_dsp_load); #ifdef CONFIG_PM /* * suspend */ int snd_vx_suspend(struct vx_core *chip, pm_message_t state) { unsigned int i; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); chip->chip_status |= VX_STAT_IN_SUSPEND; for (i = 0; i < chip->hw->num_codecs; i++) snd_pcm_suspend_all(chip->pcm[i]); return 0; } EXPORT_SYMBOL(snd_vx_suspend); /* * resume */ int snd_vx_resume(struct vx_core *chip) { int i, err; chip->chip_status &= ~VX_STAT_CHIP_INIT; for (i = 0; i < 4; i++) { if (! chip->firmware[i]) continue; err = chip->ops->load_dsp(chip, i, chip->firmware[i]); if (err < 0) { snd_printk(KERN_ERR "vx: firmware resume error at DSP %d\n", i); return -EIO; } } chip->chip_status |= VX_STAT_CHIP_INIT; chip->chip_status &= ~VX_STAT_IN_SUSPEND; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); return 0; } EXPORT_SYMBOL(snd_vx_resume); #endif /** * snd_vx_create - constructor for struct vx_core * @hw: hardware specific record * * this function allocates the instance and prepare for the hardware * initialization. * * return the instance pointer if successful, NULL in error. */ struct vx_core *snd_vx_create(struct snd_card *card, struct snd_vx_hardware *hw, struct snd_vx_ops *ops, int extra_size) { struct vx_core *chip; if (snd_BUG_ON(!card || !hw || !ops)) return NULL; chip = kzalloc(sizeof(*chip) + extra_size, GFP_KERNEL); if (! chip) { snd_printk(KERN_ERR "vx_core: no memory\n"); return NULL; } spin_lock_init(&chip->lock); spin_lock_init(&chip->irq_lock); chip->irq = -1; chip->hw = hw; chip->type = hw->type; chip->ops = ops; tasklet_init(&chip->tq, vx_interrupt, (unsigned long)chip); mutex_init(&chip->mixer_mutex); chip->card = card; card->private_data = chip; strcpy(card->driver, hw->name); sprintf(card->shortname, "Digigram %s", hw->name); vx_proc_init(chip); return chip; } EXPORT_SYMBOL(snd_vx_create); /* * module entries */ static int __init alsa_vx_core_init(void) { return 0; } static void __exit alsa_vx_core_exit(void) { } module_init(alsa_vx_core_init) module_exit(alsa_vx_core_exit)
gpl-2.0
Insswer/kernel_imx
arch/tile/lib/memcpy_tile64.c
7410
8973
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/string.h> #include <linux/smp.h> #include <linux/module.h> #include <linux/uaccess.h> #include <asm/fixmap.h> #include <asm/kmap_types.h> #include <asm/tlbflush.h> #include <hv/hypervisor.h> #include <arch/chip.h> #if !CHIP_HAS_COHERENT_LOCAL_CACHE() /* Defined in memcpy.S */ extern unsigned long __memcpy_asm(void *to, const void *from, unsigned long n); extern unsigned long __copy_to_user_inatomic_asm( void __user *to, const void *from, unsigned long n); extern unsigned long __copy_from_user_inatomic_asm( void *to, const void __user *from, unsigned long n); extern unsigned long __copy_from_user_zeroing_asm( void *to, const void __user *from, unsigned long n); typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long); /* Size above which to consider TLB games for performance */ #define LARGE_COPY_CUTOFF 2048 /* Communicate to the simulator what we are trying to do. */ #define sim_allow_multiple_caching(b) \ __insn_mtspr(SPR_SIM_CONTROL, \ SIM_CONTROL_ALLOW_MULTIPLE_CACHING | ((b) << _SIM_CONTROL_OPERATOR_BITS)) /* * Copy memory by briefly enabling incoherent cacheline-at-a-time mode. * * We set up our own source and destination PTEs that we fully control. * This is the only way to guarantee that we don't race with another * thread that is modifying the PTE; we can't afford to try the * copy_{to,from}_user() technique of catching the interrupt, since * we must run with interrupts disabled to avoid the risk of some * other code seeing the incoherent data in our cache. (Recall that * our cache is indexed by PA, so even if the other code doesn't use * our kmap_atomic virtual addresses, they'll still hit in cache using * the normal VAs that aren't supposed to hit in cache.) */ static void memcpy_multicache(void *dest, const void *source, pte_t dst_pte, pte_t src_pte, int len) { int idx; unsigned long flags, newsrc, newdst; pmd_t *pmdp; pte_t *ptep; int type0, type1; int cpu = get_cpu(); /* * Disable interrupts so that we don't recurse into memcpy() * in an interrupt handler, nor accidentally reference * the PA of the source from an interrupt routine. Also * notify the simulator that we're playing games so we don't * generate spurious coherency warnings. */ local_irq_save(flags); sim_allow_multiple_caching(1); /* Set up the new dest mapping */ type0 = kmap_atomic_idx_push(); idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0; newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); ptep = pte_offset_kernel(pmdp, newdst); if (pte_val(*ptep) != pte_val(dst_pte)) { set_pte(ptep, dst_pte); local_flush_tlb_page(NULL, newdst, PAGE_SIZE); } /* Set up the new source mapping */ type1 = kmap_atomic_idx_push(); idx += (type0 - type1); src_pte = hv_pte_set_nc(src_pte); src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc); ptep = pte_offset_kernel(pmdp, newsrc); __set_pte(ptep, src_pte); /* set_pte() would be confused by this */ local_flush_tlb_page(NULL, newsrc, PAGE_SIZE); /* Actually move the data. */ __memcpy_asm((void *)newdst, (const void *)newsrc, len); /* * Remap the source as locally-cached and not OLOC'ed so that * we can inval without also invaling the remote cpu's cache. * This also avoids known errata with inv'ing cacheable oloc data. */ src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3); src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */ __set_pte(ptep, src_pte); /* set_pte() would be confused by this */ local_flush_tlb_page(NULL, newsrc, PAGE_SIZE); /* * Do the actual invalidation, covering the full L2 cache line * at the end since __memcpy_asm() is somewhat aggressive. */ __inv_buffer((void *)newsrc, len); /* * We're done: notify the simulator that all is back to normal, * and re-enable interrupts and pre-emption. */ kmap_atomic_idx_pop(); kmap_atomic_idx_pop(); sim_allow_multiple_caching(0); local_irq_restore(flags); put_cpu(); } /* * Identify large copies from remotely-cached memory, and copy them * via memcpy_multicache() if they look good, otherwise fall back * to the particular kind of copying passed as the memcpy_t function. */ static unsigned long fast_copy(void *dest, const void *source, int len, memcpy_t func) { /* * Check if it's big enough to bother with. We may end up doing a * small copy via TLB manipulation if we're near a page boundary, * but presumably we'll make it up when we hit the second page. */ while (len >= LARGE_COPY_CUTOFF) { int copy_size, bytes_left_on_page; pte_t *src_ptep, *dst_ptep; pte_t src_pte, dst_pte; struct page *src_page, *dst_page; /* Is the source page oloc'ed to a remote cpu? */ retry_source: src_ptep = virt_to_pte(current->mm, (unsigned long)source); if (src_ptep == NULL) break; src_pte = *src_ptep; if (!hv_pte_get_present(src_pte) || !hv_pte_get_readable(src_pte) || hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3) break; if (get_remote_cache_cpu(src_pte) == smp_processor_id()) break; src_page = pfn_to_page(hv_pte_get_pfn(src_pte)); get_page(src_page); if (pte_val(src_pte) != pte_val(*src_ptep)) { put_page(src_page); goto retry_source; } if (pte_huge(src_pte)) { /* Adjust the PTE to correspond to a small page */ int pfn = hv_pte_get_pfn(src_pte); pfn += (((unsigned long)source & (HPAGE_SIZE-1)) >> PAGE_SHIFT); src_pte = pfn_pte(pfn, src_pte); src_pte = pte_mksmall(src_pte); } /* Is the destination page writable? */ retry_dest: dst_ptep = virt_to_pte(current->mm, (unsigned long)dest); if (dst_ptep == NULL) { put_page(src_page); break; } dst_pte = *dst_ptep; if (!hv_pte_get_present(dst_pte) || !hv_pte_get_writable(dst_pte)) { put_page(src_page); break; } dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte)); if (dst_page == src_page) { /* * Source and dest are on the same page; this * potentially exposes us to incoherence if any * part of src and dest overlap on a cache line. * Just give up rather than trying to be precise. */ put_page(src_page); break; } get_page(dst_page); if (pte_val(dst_pte) != pte_val(*dst_ptep)) { put_page(dst_page); goto retry_dest; } if (pte_huge(dst_pte)) { /* Adjust the PTE to correspond to a small page */ int pfn = hv_pte_get_pfn(dst_pte); pfn += (((unsigned long)dest & (HPAGE_SIZE-1)) >> PAGE_SHIFT); dst_pte = pfn_pte(pfn, dst_pte); dst_pte = pte_mksmall(dst_pte); } /* All looks good: create a cachable PTE and copy from it */ copy_size = len; bytes_left_on_page = PAGE_SIZE - (((int)source) & (PAGE_SIZE-1)); if (copy_size > bytes_left_on_page) copy_size = bytes_left_on_page; bytes_left_on_page = PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1)); if (copy_size > bytes_left_on_page) copy_size = bytes_left_on_page; memcpy_multicache(dest, source, dst_pte, src_pte, copy_size); /* Release the pages */ put_page(dst_page); put_page(src_page); /* Continue on the next page */ dest += copy_size; source += copy_size; len -= copy_size; } return func(dest, source, len); } void *memcpy(void *to, const void *from, __kernel_size_t n) { if (n < LARGE_COPY_CUTOFF) return (void *)__memcpy_asm(to, from, n); else return (void *)fast_copy(to, from, n, __memcpy_asm); } unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { if (n < LARGE_COPY_CUTOFF) return __copy_to_user_inatomic_asm(to, from, n); else return fast_copy(to, from, n, __copy_to_user_inatomic_asm); } unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { if (n < LARGE_COPY_CUTOFF) return __copy_from_user_inatomic_asm(to, from, n); else return fast_copy(to, from, n, __copy_from_user_inatomic_asm); } unsigned long __copy_from_user_zeroing(void *to, const void __user *from, unsigned long n) { if (n < LARGE_COPY_CUTOFF) return __copy_from_user_zeroing_asm(to, from, n); else return fast_copy(to, from, n, __copy_from_user_zeroing_asm); } #endif /* !CHIP_HAS_COHERENT_LOCAL_CACHE() */
gpl-2.0
nikhil16242/stock-golfu-kenrel
arch/parisc/hpux/fs.c
7666
5154
/* * Implements HPUX syscalls. * * Copyright (C) 1999 Matthew Wilcox <willy with parisc-linux.org> * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> * Copyright (C) 2000 John Marvin <jsm with parisc-linux.org> * Copyright (C) 2000 Philipp Rumpf * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <asm/errno.h> #include <asm/uaccess.h> int hpux_execve(struct pt_regs *regs) { int error; char *filename; filename = getname((const char __user *) regs->gr[26]); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, (const char __user *const __user *) regs->gr[25], (const char __user *const __user *) regs->gr[24], regs); putname(filename); out: return error; } struct hpux_dirent { loff_t d_off; ino_t d_ino; short d_reclen; short d_namlen; char d_name[1]; }; struct getdents_callback { struct hpux_dirent __user *current_dir; struct hpux_dirent __user *previous; int count; int error; }; #define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de))) static int filldir(void * __buf, const char * name, int namlen, loff_t offset, u64 ino, unsigned d_type) { struct hpux_dirent __user * dirent; struct getdents_callback * buf = (struct getdents_callback *) __buf; ino_t d_ino; int reclen = ALIGN(NAME_OFFSET(dirent) + namlen + 1, sizeof(long)); buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return -EOVERFLOW; } dirent = buf->previous; if (dirent) if (put_user(offset, &dirent->d_off)) goto Efault; dirent = buf->current_dir; if (put_user(d_ino, &dirent->d_ino) || put_user(reclen, &dirent->d_reclen) || put_user(namlen, &dirent->d_namlen) || copy_to_user(dirent->d_name, name, namlen) || put_user(0, dirent->d_name + namlen)) goto Efault; buf->previous = dirent; buf->current_dir = (void __user *)dirent + reclen; buf->count -= reclen; return 0; Efault: buf->error = -EFAULT; return -EFAULT; } #undef NAME_OFFSET int hpux_getdents(unsigned int fd, struct hpux_dirent __user *dirent, unsigned int count) { struct file * file; struct hpux_dirent __user * lastdirent; struct getdents_callback buf; int error = -EBADF; file = fget(fd); if (!file) goto out; buf.current_dir = dirent; buf.previous = NULL; buf.count = count; buf.error = 0; error = vfs_readdir(file, filldir, &buf); if (error >= 0) error = buf.error; lastdirent = buf.previous; if (lastdirent) { if (put_user(file->f_pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.count; } fput(file); out: return error; } int hpux_mount(const char *fs, const char *path, int mflag, const char *fstype, const char *dataptr, int datalen) { return -ENOSYS; } static int cp_hpux_stat(struct kstat *stat, struct hpux_stat64 __user *statbuf) { struct hpux_stat64 tmp; /* we probably want a different split here - is hpux 12:20? */ if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev)) return -EOVERFLOW; memset(&tmp, 0, sizeof(tmp)); tmp.st_dev = new_encode_dev(stat->dev); tmp.st_ino = stat->ino; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; tmp.st_uid = stat->uid; tmp.st_gid = stat->gid; tmp.st_rdev = new_encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } long hpux_stat64(const char __user *filename, struct hpux_stat64 __user *statbuf) { struct kstat stat; int error = vfs_stat(filename, &stat); if (!error) error = cp_hpux_stat(&stat, statbuf); return error; } long hpux_fstat64(unsigned int fd, struct hpux_stat64 __user *statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_hpux_stat(&stat, statbuf); return error; } long hpux_lstat64(const char __user *filename, struct hpux_stat64 __user *statbuf) { struct kstat stat; int error = vfs_lstat(filename, &stat); if (!error) error = cp_hpux_stat(&stat, statbuf); return error; }
gpl-2.0
bilalliberty/android_kernel_HTC_ville_evita
arch/mips/powertv/asic/prealloc-zeus.c
8690
7872
/* * Memory pre-allocations for Zeus boxes. * * Copyright (C) 2005-2009 Scientific-Atlanta, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Author: Ken Eppinett * David Schleef <ds@schleef.org> */ #include <linux/init.h> #include <linux/ioport.h> #include <asm/mach-powertv/asic.h> #include "prealloc.h" /* * DVR_CAPABLE RESOURCES */ struct resource dvr_zeus_resources[] __initdata = { /* * VIDEO1 / LX1 */ /* Delta-Mu 1 image (2MiB) */ PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1, IORESOURCE_MEM) /* Delta-Mu 1 monitor (8KiB) */ PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1, IORESOURCE_MEM) /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */ PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1, IORESOURCE_MEM) /* * VIDEO2 / LX2 */ /* Delta-Mu 2 image (2MiB) */ PREALLOC_NORMAL("ST231bImage", 0x30000000, 0x30200000-1, IORESOURCE_MEM) /* Delta-Mu 2 monitor (8KiB) */ PREALLOC_NORMAL("ST231bMonitor", 0x30200000, 0x30202000-1, IORESOURCE_MEM) /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */ PREALLOC_NORMAL("MediaMemory2", 0x30202000, 0x32000000-1, IORESOURCE_MEM) /* * Sysaudio Driver */ /* DSP code and data images (1MiB) */ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC CPU PCM buffer (40KiB) */ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC AUX buffer (16KiB) */ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC Main buffer (16KiB) */ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * STAVEM driver/STAPI * * This memory area is used for allocating buffers for Video decoding * purposes. Allocation/De-allocation within this buffer is managed * by the STAVMEM driver of the STAPI. They could be Decimated * Picture Buffers, Intermediate Buffers, as deemed necessary for * video decoding purposes, for any video decoders on Zeus. */ /* 12MiB */ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * DOCSIS Subsystem */ /* 7MiB */ PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM) /* * GHW HAL Driver */ /* PowerTV Graphics Heap (14MiB) */ PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1, IORESOURCE_MEM) /* * multi com buffer area */ /* 128KiB */ PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1, IORESOURCE_MEM) /* * DMA Ring buffer */ /* 2.5MiB */ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit0 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit1 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * ITFS */ /* 815,104 bytes each for 2 ITFS partitions. */ PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * AVFS */ /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* 4KiB */ PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * PMEM */ /* Persistent memory for diagnostics (64KiB) */ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Smartcard */ /* Read and write buffers for Internal/External cards (10KiB) */ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * TFTPBuffer * * This buffer is used in some minimal configurations (e.g. two-way * loader) for storing software images */ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Add other resources here */ /* * End of Resource marker */ { .flags = 0, }, }; /* * NON_DVR_CAPABLE ZEUS RESOURCES */ struct resource non_dvr_zeus_resources[] __initdata = { /* * VIDEO1 / LX1 */ /* Delta-Mu 1 image (2MiB) */ PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1, IORESOURCE_MEM) /* Delta-Mu 1 monitor (8KiB) */ PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1, IORESOURCE_MEM) /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */ PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1, IORESOURCE_MEM) /* * Sysaudio Driver */ /* DSP code and data images (1MiB) */ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC CPU PCM buffer (40KiB) */ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC AUX buffer (16KiB) */ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* ADSC Main buffer (16KiB) */ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * STAVEM driver/STAPI */ /* 6MiB */ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * DOCSIS Subsystem */ /* 7MiB */ PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM) /* * GHW HAL Driver */ /* PowerTV Graphics Heap (14MiB) */ PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1, IORESOURCE_MEM) /* * multi com buffer area */ /* 128KiB */ PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1, IORESOURCE_MEM) /* * DMA Ring buffer */ /* 2.5MiB */ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Display bins buffer for unit0 */ /* 4KiB */ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * AVFS: player HAL memory */ /* 945K * 3 for playback */ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * PMEM */ /* Persistent memory for diagnostics (64KiB) */ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Smartcard */ /* Read and write buffers for Internal/External cards (10KiB) */ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * NAND Flash */ /* 10KiB */ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1, IORESOURCE_MEM) /* * TFTPBuffer * * This buffer is used in some minimal configurations (e.g. two-way * loader) for storing software images */ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1, (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT)) /* * Add other resources here */ /* * End of Resource marker */ { .flags = 0, }, };
gpl-2.0
bestmjh47/ActiveKernel_M250S-JB
arch/mips/pmc-sierra/msp71xx/msp_prom.c
8690
11614
/* * BRIEF MODULE DESCRIPTION * PROM library initialisation code, assuming a version of * pmon is the boot code. * * Copyright 2000,2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * This file was derived from Carsten Langgaard's * arch/mips/mips-boards/xx files. * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> #include <asm-generic/sections.h> #include <asm/page.h> #include <msp_prom.h> #include <msp_regs.h> /* global PROM environment variables and pointers */ int prom_argc; char **prom_argv, **prom_envp; int *prom_vec; /* debug flag */ int init_debug = 1; /* memory blocks */ struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS]; /* default feature sets */ static char msp_default_features[] = #if defined(CONFIG_PMC_MSP4200_EVAL) \ || defined(CONFIG_PMC_MSP4200_GW) "ERER"; #elif defined(CONFIG_PMC_MSP7120_EVAL) \ || defined(CONFIG_PMC_MSP7120_GW) "EMEMSP"; #elif defined(CONFIG_PMC_MSP7120_FPGA) "EMEM"; #endif /* conversion functions */ static inline unsigned char str2hexnum(unsigned char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; return 0; /* foo */ } static inline int str2eaddr(unsigned char *ea, unsigned char *str) { int index = 0; unsigned char num = 0; while (*str != '\0') { if ((*str == '.') || (*str == ':')) { ea[index++] = num; num = 0; str++; } else { num = num << 4; num |= str2hexnum(*str++); } } if (index == 5) { ea[index++] = num; return 0; } else return -1; } EXPORT_SYMBOL(str2eaddr); static inline unsigned long str2hex(unsigned char *str) { int value = 0; while (*str) { value = value << 4; value |= str2hexnum(*str++); } return value; } /* function to query the system information */ const char *get_system_type(void) { #if defined(CONFIG_PMC_MSP4200_EVAL) return "PMC-Sierra MSP4200 Eval Board"; #elif defined(CONFIG_PMC_MSP4200_GW) return "PMC-Sierra MSP4200 VoIP Gateway"; #elif defined(CONFIG_PMC_MSP7120_EVAL) return "PMC-Sierra MSP7120 Eval Board"; #elif defined(CONFIG_PMC_MSP7120_GW) return "PMC-Sierra MSP7120 Residential Gateway"; #elif defined(CONFIG_PMC_MSP7120_FPGA) return "PMC-Sierra MSP7120 FPGA"; #else #error "What is the type of *your* MSP?" #endif } int get_ethernet_addr(char *ethaddr_name, char *ethernet_addr) { char *ethaddr_str; ethaddr_str = prom_getenv(ethaddr_name); if (!ethaddr_str) { printk(KERN_WARNING "%s not set in boot prom\n", ethaddr_name); return -1; } if (str2eaddr(ethernet_addr, ethaddr_str) == -1) { printk(KERN_WARNING "%s badly formatted-<%s>\n", ethaddr_name, ethaddr_str); return -1; } if (init_debug > 1) { int i; printk(KERN_DEBUG "get_ethernet_addr: for %s ", ethaddr_name); for (i = 0; i < 5; i++) printk(KERN_DEBUG "%02x:", (unsigned char)*(ethernet_addr+i)); printk(KERN_DEBUG "%02x\n", *(ethernet_addr+i)); } return 0; } EXPORT_SYMBOL(get_ethernet_addr); static char *get_features(void) { char *feature = prom_getenv(FEATURES); if (feature == NULL) { /* default features based on MACHINE_TYPE */ feature = msp_default_features; } return feature; } static char test_feature(char c) { char *feature = get_features(); while (*feature) { if (*feature++ == c) return *feature; feature++; } return FEATURE_NOEXIST; } unsigned long get_deviceid(void) { char *deviceid = prom_getenv(DEVICEID); if (deviceid == NULL) return *DEV_ID_REG; else return str2hex(deviceid); } char identify_pci(void) { return test_feature(PCI_KEY); } EXPORT_SYMBOL(identify_pci); char identify_pcimux(void) { return test_feature(PCIMUX_KEY); } char identify_sec(void) { return test_feature(SEC_KEY); } EXPORT_SYMBOL(identify_sec); char identify_spad(void) { return test_feature(SPAD_KEY); } EXPORT_SYMBOL(identify_spad); char identify_tdm(void) { return test_feature(TDM_KEY); } EXPORT_SYMBOL(identify_tdm); char identify_zsp(void) { return test_feature(ZSP_KEY); } EXPORT_SYMBOL(identify_zsp); static char identify_enetfeature(char key, unsigned long interface_num) { char *feature = get_features(); while (*feature) { if (*feature++ == key && interface_num-- == 0) return *feature; feature++; } return FEATURE_NOEXIST; } char identify_enet(unsigned long interface_num) { return identify_enetfeature(ENET_KEY, interface_num); } EXPORT_SYMBOL(identify_enet); char identify_enetTxD(unsigned long interface_num) { return identify_enetfeature(ENETTXD_KEY, interface_num); } EXPORT_SYMBOL(identify_enetTxD); unsigned long identify_family(void) { unsigned long deviceid; deviceid = get_deviceid(); return deviceid & CPU_DEVID_FAMILY; } EXPORT_SYMBOL(identify_family); unsigned long identify_revision(void) { unsigned long deviceid; deviceid = get_deviceid(); return deviceid & CPU_DEVID_REVISION; } EXPORT_SYMBOL(identify_revision); /* PROM environment functions */ char *prom_getenv(char *env_name) { /* * Return a pointer to the given environment variable. prom_envp * points to a null terminated array of pointers to variables. * Environment variables are stored in the form of "memsize=64" */ char **var = prom_envp; int i = strlen(env_name); while (*var) { if (strncmp(env_name, *var, i) == 0) { return (*var + strlen(env_name) + 1); } var++; } return NULL; } /* PROM commandline functions */ void __init prom_init_cmdline(void) { char *cp; int actr; actr = 1; /* Always ignore argv[0] */ cp = &(arcs_cmdline[0]); while (actr < prom_argc) { strcpy(cp, prom_argv[actr]); cp += strlen(prom_argv[actr]); *cp++ = ' '; actr++; } if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */ --cp; *cp = '\0'; } /* memory allocation functions */ static int __init prom_memtype_classify(unsigned int type) { switch (type) { case yamon_free: return BOOT_MEM_RAM; case yamon_prom: return BOOT_MEM_ROM_DATA; default: return BOOT_MEM_RESERVED; } } void __init prom_meminit(void) { struct prom_pmemblock *p; p = prom_getmdesc(); while (p->size) { long type; unsigned long base, size; type = prom_memtype_classify(p->type); base = p->base; size = p->size; add_memory_region(base, size, type); p++; } } void __init prom_free_prom_memory(void) { int argc; char **argv; char **envp; char *ptr; int len = 0; int i; unsigned long addr; /* * preserve environment variables and command line from pmon/bbload * first preserve the command line */ for (argc = 0; argc < prom_argc; argc++) { len += sizeof(char *); /* length of pointer */ len += strlen(prom_argv[argc]) + 1; /* length of string */ } len += sizeof(char *); /* plus length of null pointer */ argv = kmalloc(len, GFP_KERNEL); ptr = (char *) &argv[prom_argc + 1]; /* strings follow array */ for (argc = 0; argc < prom_argc; argc++) { argv[argc] = ptr; strcpy(ptr, prom_argv[argc]); ptr += strlen(prom_argv[argc]) + 1; } argv[prom_argc] = NULL; /* end array with null pointer */ prom_argv = argv; /* next preserve the environment variables */ len = 0; i = 0; for (envp = prom_envp; *envp != NULL; envp++) { i++; /* count number of environment variables */ len += sizeof(char *); /* length of pointer */ len += strlen(*envp) + 1; /* length of string */ } len += sizeof(char *); /* plus length of null pointer */ envp = kmalloc(len, GFP_KERNEL); ptr = (char *) &envp[i+1]; for (argc = 0; argc < i; argc++) { envp[argc] = ptr; strcpy(ptr, prom_envp[argc]); ptr += strlen(prom_envp[argc]) + 1; } envp[i] = NULL; /* end array with null pointer */ prom_envp = envp; for (i = 0; i < boot_mem_map.nr_map; i++) { if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) continue; addr = boot_mem_map.map[i].addr; free_init_pages("prom memory", addr, addr + boot_mem_map.map[i].size); } } struct prom_pmemblock *__init prom_getmdesc(void) { static char memsz_env[] __initdata = "memsize"; static char heaptop_env[] __initdata = "heaptop"; char *str; unsigned int memsize; unsigned int heaptop; int i; str = prom_getenv(memsz_env); if (!str) { ppfinit("memsize not set in boot prom, " "set to default (32Mb)\n"); memsize = 0x02000000; } else { memsize = simple_strtol(str, NULL, 0); if (memsize == 0) { /* if memsize is a bad size, use reasonable default */ memsize = 0x02000000; } /* convert to physical address (removing caching bits, etc) */ memsize = CPHYSADDR(memsize); } str = prom_getenv(heaptop_env); if (!str) { heaptop = CPHYSADDR((u32)&_text); ppfinit("heaptop not set in boot prom, " "set to default 0x%08x\n", heaptop); } else { heaptop = simple_strtol(str, NULL, 16); if (heaptop == 0) { /* heaptop conversion bad, might have 0xValue */ heaptop = simple_strtol(str, NULL, 0); if (heaptop == 0) { /* heaptop still bad, use reasonable default */ heaptop = CPHYSADDR((u32)&_text); } } /* convert to physical address (removing caching bits, etc) */ heaptop = CPHYSADDR((u32)heaptop); } /* the base region */ i = 0; mdesc[i].type = BOOT_MEM_RESERVED; mdesc[i].base = 0x00000000; mdesc[i].size = PAGE_ALIGN(0x300 + 0x80); /* jtag interrupt vector + sizeof vector */ /* PMON data */ if (heaptop > mdesc[i].base + mdesc[i].size) { i++; /* 1 */ mdesc[i].type = BOOT_MEM_ROM_DATA; mdesc[i].base = mdesc[i-1].base + mdesc[i-1].size; mdesc[i].size = heaptop - mdesc[i].base; } /* end of PMON data to start of kernel -- probably zero .. */ if (heaptop != CPHYSADDR((u32)_text)) { i++; /* 2 */ mdesc[i].type = BOOT_MEM_RAM; mdesc[i].base = heaptop; mdesc[i].size = CPHYSADDR((u32)_text) - mdesc[i].base; } /* kernel proper */ i++; /* 3 */ mdesc[i].type = BOOT_MEM_RESERVED; mdesc[i].base = CPHYSADDR((u32)_text); mdesc[i].size = CPHYSADDR(PAGE_ALIGN((u32)_end)) - mdesc[i].base; /* Remainder of RAM -- under memsize */ i++; /* 5 */ mdesc[i].type = yamon_free; mdesc[i].base = mdesc[i-1].base + mdesc[i-1].size; mdesc[i].size = memsize - mdesc[i].base; return &mdesc[0]; }
gpl-2.0
AICP/kernel_motorola_msm8226
arch/alpha/lib/csum_partial_copy.c
11506
8909
/* * csum_partial_copy - do IP checksumming and copy * * (C) Copyright 1996 Linus Torvalds * accelerated versions (and 21264 assembly versions ) contributed by * Rick Gorton <rick.gorton@alpha-processor.com> * * Don't look at this too closely - you'll go mad. The things * we do for performance.. */ #include <linux/types.h> #include <linux/string.h> #include <asm/uaccess.h> #define ldq_u(x,y) \ __asm__ __volatile__("ldq_u %0,%1":"=r" (x):"m" (*(const unsigned long *)(y))) #define stq_u(x,y) \ __asm__ __volatile__("stq_u %1,%0":"=m" (*(unsigned long *)(y)):"r" (x)) #define extql(x,y,z) \ __asm__ __volatile__("extql %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define extqh(x,y,z) \ __asm__ __volatile__("extqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define mskql(x,y,z) \ __asm__ __volatile__("mskql %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define mskqh(x,y,z) \ __asm__ __volatile__("mskqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define insql(x,y,z) \ __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define insqh(x,y,z) \ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define __get_user_u(x,ptr) \ ({ \ long __guu_err; \ __asm__ __volatile__( \ "1: ldq_u %0,%2\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ " .long 1b - .\n" \ " lda %0,2b-1b(%1)\n" \ ".previous" \ : "=r"(x), "=r"(__guu_err) \ : "m"(__m(ptr)), "1"(0)); \ __guu_err; \ }) #define __put_user_u(x,ptr) \ ({ \ long __puu_err; \ __asm__ __volatile__( \ "1: stq_u %2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ " .long 1b - ." \ " lda $31,2b-1b(%0)\n" \ ".previous" \ : "=r"(__puu_err) \ : "m"(__m(addr)), "rJ"(x), "0"(0)); \ __puu_err; \ }) static inline unsigned short from64to16(unsigned long x) { /* Using extract instructions is a bit more efficient than the original shift/bitmask version. */ union { unsigned long ul; unsigned int ui[2]; unsigned short us[4]; } in_v, tmp_v, out_v; in_v.ul = x; tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1]; /* Since the bits of tmp_v.sh[3] are going to always be zero, we don't have to bother to add that in. */ out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1] + (unsigned long) tmp_v.us[2]; /* Similarly, out_v.us[2] is always zero for the final add. */ return out_v.us[0] + out_v.us[1]; } /* * Ok. This isn't fun, but this is the EASY case. */ static inline unsigned long csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, long len, unsigned long checksum, int *errp) { unsigned long carry = 0; int err = 0; while (len >= 0) { unsigned long word; err |= __get_user(word, src); checksum += carry; src++; checksum += word; len -= 8; carry = checksum < word; *dst = word; dst++; } len += 8; checksum += carry; if (len) { unsigned long word, tmp; err |= __get_user(word, src); tmp = *dst; mskql(word, len, word); checksum += word; mskqh(tmp, len, tmp); carry = checksum < word; *dst = word | tmp; checksum += carry; } if (err) *errp = err; return checksum; } /* * This is even less fun, but this is still reasonably * easy. */ static inline unsigned long csum_partial_cfu_dest_aligned(const unsigned long __user *src, unsigned long *dst, unsigned long soff, long len, unsigned long checksum, int *errp) { unsigned long first; unsigned long word, carry; unsigned long lastsrc = 7+len+(unsigned long)src; int err = 0; err |= __get_user_u(first,src); carry = 0; while (len >= 0) { unsigned long second; err |= __get_user_u(second, src+1); extql(first, soff, word); len -= 8; src++; extqh(second, soff, first); checksum += carry; word |= first; first = second; checksum += word; *dst = word; dst++; carry = checksum < word; } len += 8; checksum += carry; if (len) { unsigned long tmp; unsigned long second; err |= __get_user_u(second, lastsrc); tmp = *dst; extql(first, soff, word); extqh(second, soff, first); word |= first; mskql(word, len, word); checksum += word; mskqh(tmp, len, tmp); carry = checksum < word; *dst = word | tmp; checksum += carry; } if (err) *errp = err; return checksum; } /* * This is slightly less fun than the above.. */ static inline unsigned long csum_partial_cfu_src_aligned(const unsigned long __user *src, unsigned long *dst, unsigned long doff, long len, unsigned long checksum, unsigned long partial_dest, int *errp) { unsigned long carry = 0; unsigned long word; unsigned long second_dest; int err = 0; mskql(partial_dest, doff, partial_dest); while (len >= 0) { err |= __get_user(word, src); len -= 8; insql(word, doff, second_dest); checksum += carry; stq_u(partial_dest | second_dest, dst); src++; checksum += word; insqh(word, doff, partial_dest); carry = checksum < word; dst++; } len += 8; if (len) { checksum += carry; err |= __get_user(word, src); mskql(word, len, word); len -= 8; checksum += word; insql(word, doff, second_dest); len += doff; carry = checksum < word; partial_dest |= second_dest; if (len >= 0) { stq_u(partial_dest, dst); if (!len) goto out; dst++; insqh(word, doff, partial_dest); } doff = len; } ldq_u(second_dest, dst); mskqh(second_dest, doff, second_dest); stq_u(partial_dest | second_dest, dst); out: checksum += carry; if (err) *errp = err; return checksum; } /* * This is so totally un-fun that it's frightening. Don't * look at this too closely, you'll go blind. */ static inline unsigned long csum_partial_cfu_unaligned(const unsigned long __user * src, unsigned long * dst, unsigned long soff, unsigned long doff, long len, unsigned long checksum, unsigned long partial_dest, int *errp) { unsigned long carry = 0; unsigned long first; unsigned long lastsrc; int err = 0; err |= __get_user_u(first, src); lastsrc = 7+len+(unsigned long)src; mskql(partial_dest, doff, partial_dest); while (len >= 0) { unsigned long second, word; unsigned long second_dest; err |= __get_user_u(second, src+1); extql(first, soff, word); checksum += carry; len -= 8; extqh(second, soff, first); src++; word |= first; first = second; insql(word, doff, second_dest); checksum += word; stq_u(partial_dest | second_dest, dst); carry = checksum < word; insqh(word, doff, partial_dest); dst++; } len += doff; checksum += carry; if (len >= 0) { unsigned long second, word; unsigned long second_dest; err |= __get_user_u(second, lastsrc); extql(first, soff, word); extqh(second, soff, first); word |= first; first = second; mskql(word, len-doff, word); checksum += word; insql(word, doff, second_dest); carry = checksum < word; stq_u(partial_dest | second_dest, dst); if (len) { ldq_u(second_dest, dst+1); insqh(word, doff, partial_dest); mskqh(second_dest, len, second_dest); stq_u(partial_dest | second_dest, dst+1); } checksum += carry; } else { unsigned long second, word; unsigned long second_dest; err |= __get_user_u(second, lastsrc); extql(first, soff, word); extqh(second, soff, first); word |= first; ldq_u(second_dest, dst); mskql(word, len-doff, word); checksum += word; mskqh(second_dest, len, second_dest); carry = checksum < word; insql(word, doff, word); stq_u(partial_dest | word | second_dest, dst); checksum += carry; } if (err) *errp = err; return checksum; } __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp) { unsigned long checksum = (__force u32) sum; unsigned long soff = 7 & (unsigned long) src; unsigned long doff = 7 & (unsigned long) dst; if (len) { if (!doff) { if (!soff) checksum = csum_partial_cfu_aligned( (const unsigned long __user *) src, (unsigned long *) dst, len-8, checksum, errp); else checksum = csum_partial_cfu_dest_aligned( (const unsigned long __user *) src, (unsigned long *) dst, soff, len-8, checksum, errp); } else { unsigned long partial_dest; ldq_u(partial_dest, dst); if (!soff) checksum = csum_partial_cfu_src_aligned( (const unsigned long __user *) src, (unsigned long *) dst, doff, len-8, checksum, partial_dest, errp); else checksum = csum_partial_cfu_unaligned( (const unsigned long __user *) src, (unsigned long *) dst, soff, doff, len-8, checksum, partial_dest, errp); } checksum = from64to16 (checksum); } return (__force __wsum)checksum; } __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { return csum_partial_copy_from_user((__force const void __user *)src, dst, len, sum, NULL); }
gpl-2.0
abhijeet-dev/linux-samsung
arch/arm/common/locomo.c
12018
24146
/* * linux/arch/arm/common/locomo.c * * Sharp LoCoMo support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains all generic LoCoMo support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. * * Based on sa1111.c */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/hardware/locomo.h> /* LoCoMo Interrupts */ #define IRQ_LOCOMO_KEY (0) #define IRQ_LOCOMO_GPIO (1) #define IRQ_LOCOMO_LT (2) #define IRQ_LOCOMO_SPI (3) /* M62332 output channel selection */ #define M62332_EVR_CH 1 /* M62332 volume channel number */ /* 0 : CH.1 , 1 : CH. 2 */ /* DAC send data */ #define M62332_SLAVE_ADDR 0x4e /* Slave address */ #define M62332_W_BIT 0x00 /* W bit (0 only) */ #define M62332_SUB_ADDR 0x00 /* Sub address */ #define M62332_A_BIT 0x00 /* A bit (0 only) */ /* DAC setup and hold times (expressed in us) */ #define DAC_BUS_FREE_TIME 5 /* 4.7 us */ #define DAC_START_SETUP_TIME 5 /* 4.7 us */ #define DAC_STOP_SETUP_TIME 4 /* 4.0 us */ #define DAC_START_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */ #define DAC_DATA_SETUP_TIME 1 /* 250 ns */ #define DAC_DATA_HOLD_TIME 1 /* 300 ns */ #define DAC_LOW_SETUP_TIME 1 /* 300 ns */ #define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */ /* the following is the overall data for the locomo chip */ struct locomo { struct device *dev; unsigned long phys; unsigned int irq; int irq_base; spinlock_t lock; void __iomem *base; #ifdef CONFIG_PM void *saved_state; #endif }; struct locomo_dev_info { unsigned long offset; unsigned long length; unsigned int devid; unsigned int irq[1]; const char * name; }; /* All the locomo devices. If offset is non-zero, the mapbase for the * locomo_dev will be set to the chip base plus offset. If offset is * zero, then the mapbase for the locomo_dev will be set to zero. An * offset of zero means the device only uses GPIOs or other helper * functions inside this file */ static struct locomo_dev_info locomo_devices[] = { { .devid = LOCOMO_DEVID_KEYBOARD, .irq = { IRQ_LOCOMO_KEY }, .name = "locomo-keyboard", .offset = LOCOMO_KEYBOARD, .length = 16, }, { .devid = LOCOMO_DEVID_FRONTLIGHT, .irq = {}, .name = "locomo-frontlight", .offset = LOCOMO_FRONTLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_BACKLIGHT, .irq = {}, .name = "locomo-backlight", .offset = LOCOMO_BACKLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_AUDIO, .irq = {}, .name = "locomo-audio", .offset = LOCOMO_AUDIO, .length = 4, }, { .devid = LOCOMO_DEVID_LED, .irq = {}, .name = "locomo-led", .offset = LOCOMO_LED, .length = 8, }, { .devid = LOCOMO_DEVID_UART, .irq = {}, .name = "locomo-uart", .offset = 0, .length = 0, }, { .devid = LOCOMO_DEVID_SPI, .irq = {}, .name = "locomo-spi", .offset = LOCOMO_SPI, .length = 0x30, }, }; static void locomo_handler(unsigned int irq, struct irq_desc *desc) { struct locomo *lchip = irq_get_chip_data(irq); int req, i; /* Acknowledge the parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* check why this interrupt was generated */ req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; if (req) { /* generate the next interrupt(s) */ irq = lchip->irq_base; for (i = 0; i <= 3; i++, irq++) { if (req & (0x0100 << i)) { generic_handle_irq(irq); } } } } static void locomo_ack_irq(struct irq_data *d) { } static void locomo_mask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r &= ~(0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static void locomo_unmask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r |= (0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static struct irq_chip locomo_chip = { .name = "LOCOMO", .irq_ack = locomo_ack_irq, .irq_mask = locomo_mask_irq, .irq_unmask = locomo_unmask_irq, }; static void locomo_setup_irq(struct locomo *lchip) { int irq = lchip->irq_base; /* * Install handler for IRQ_LOCOMO_HW. */ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); irq_set_chip_data(lchip->irq, lchip); irq_set_chained_handler(lchip->irq, locomo_handler); /* Install handlers for IRQ_LOCOMO_* */ for ( ; irq <= lchip->irq_base + 3; irq++) { irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); irq_set_chip_data(irq, lchip); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } static void locomo_dev_release(struct device *_dev) { struct locomo_dev *dev = LOCOMO_DEV(_dev); kfree(dev); } static int locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info) { struct locomo_dev *dev; int ret; dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto out; } /* * If the parent device has a DMA mask associated with it, * propagate it down to the children. */ if (lchip->dev->dma_mask) { dev->dma_mask = *lchip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; } dev_set_name(&dev->dev, "%s", info->name); dev->devid = info->devid; dev->dev.parent = lchip->dev; dev->dev.bus = &locomo_bus_type; dev->dev.release = locomo_dev_release; dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; if (info->offset) dev->mapbase = lchip->base + info->offset; else dev->mapbase = 0; dev->length = info->length; dev->irq[0] = (lchip->irq_base == NO_IRQ) ? NO_IRQ : lchip->irq_base + info->irq[0]; ret = device_register(&dev->dev); if (ret) { out: kfree(dev); } return ret; } #ifdef CONFIG_PM struct locomo_save_data { u16 LCM_GPO; u16 LCM_SPICT; u16 LCM_GPE; u16 LCM_ASD; u16 LCM_SPIMD; }; static int locomo_suspend(struct platform_device *dev, pm_message_t state) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long flags; save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL); if (!save) return -ENOMEM; lchip->saved_state = save; spin_lock_irqsave(&lchip->lock, flags); save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPO); save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPE); save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ locomo_writel(0x00, lchip->base + LOCOMO_ASD); save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */ locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_PAIF); locomo_writel(0x00, lchip->base + LOCOMO_DAC); locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC); if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88)) locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */ else /* 18MHz already enabled, so no wait */ locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */ locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/ locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */ locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */ spin_unlock_irqrestore(&lchip->lock, flags); return 0; } static int locomo_resume(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long r; unsigned long flags; save = lchip->saved_state; if (!save) return 0; spin_lock_irqsave(&lchip->lock, flags); locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO); locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE); locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD); locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_C32K); locomo_writel(0x90, lchip->base + LOCOMO_TADC); locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC); r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); r &= 0xFEFF; locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD); spin_unlock_irqrestore(&lchip->lock, flags); lchip->saved_state = NULL; kfree(save); return 0; } #endif /** * locomo_probe - probe for a single LoCoMo chip. * @phys_addr: physical address of device. * * Probe for a LoCoMo chip. This must be called * before any other locomo-specific code. * * Returns: * %-ENODEV device not found. * %-EBUSY physical address already marked in-use. * %0 successful. */ static int __locomo_probe(struct device *me, struct resource *mem, int irq) { struct locomo_platform_data *pdata = me->platform_data; struct locomo *lchip; unsigned long r; int i, ret = -ENODEV; lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL); if (!lchip) return -ENOMEM; spin_lock_init(&lchip->lock); lchip->dev = me; dev_set_drvdata(lchip->dev, lchip); lchip->phys = mem->start; lchip->irq = irq; lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ; /* * Map the whole region. This also maps the * registers for our children. */ lchip->base = ioremap(mem->start, PAGE_SIZE); if (!lchip->base) { ret = -ENOMEM; goto out; } /* locomo initialize */ locomo_writel(0, lchip->base + LOCOMO_ICR); /* KEYBOARD */ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); /* GPIO */ locomo_writel(0, lchip->base + LOCOMO_GPO); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPE); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPD); locomo_writel(0, lchip->base + LOCOMO_GIE); /* Frontlight */ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); /* Longtime timer */ locomo_writel(0, lchip->base + LOCOMO_LTINT); /* SPI */ locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); r = locomo_readl(lchip->base + LOCOMO_ASD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_ASD); locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); r = locomo_readl(lchip->base + LOCOMO_HSD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_HSD); locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); /* XON */ locomo_writel(0x80, lchip->base + LOCOMO_TADC); udelay(1000); /* CLK9MEN */ r = locomo_readl(lchip->base + LOCOMO_TADC); r |= 0x10; locomo_writel(r, lchip->base + LOCOMO_TADC); udelay(100); /* init DAC */ r = locomo_readl(lchip->base + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, lchip->base + LOCOMO_DAC); r = locomo_readl(lchip->base + LOCOMO_VER); printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ) locomo_setup_irq(lchip); for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) locomo_init_one_child(lchip, &locomo_devices[i]); return 0; out: kfree(lchip); return ret; } static int locomo_remove_child(struct device *dev, void *data) { device_unregister(dev); return 0; } static void __locomo_remove(struct locomo *lchip) { device_for_each_child(lchip->dev, NULL, locomo_remove_child); if (lchip->irq != NO_IRQ) { irq_set_chained_handler(lchip->irq, NULL); irq_set_handler_data(lchip->irq, NULL); } iounmap(lchip->base); kfree(lchip); } static int locomo_probe(struct platform_device *dev) { struct resource *mem; int irq; mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(dev, 0); if (irq < 0) return -ENXIO; return __locomo_probe(&dev->dev, mem, irq); } static int locomo_remove(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); if (lchip) { __locomo_remove(lchip); platform_set_drvdata(dev, NULL); } return 0; } /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. */ static struct platform_driver locomo_device_driver = { .probe = locomo_probe, .remove = locomo_remove, #ifdef CONFIG_PM .suspend = locomo_suspend, .resume = locomo_resume, #endif .driver = { .name = "locomo", }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) { return (struct locomo *)dev_get_drvdata(ldev->dev.parent); } void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPD); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPD); r = locomo_readl(lchip->base + LOCOMO_GPE); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPE); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_set_dir); int locomo_gpio_read_level(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPL); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_level); int locomo_gpio_read_output(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_output); void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPO); if (set) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_write); static void locomo_m62332_sendbit(void *mapbase, int bit) { unsigned int r; r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ if (bit & 1) { r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ } else { r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ } udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ } void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel) { struct locomo *lchip = locomo_chip_driver(ldev); int i; unsigned char data; unsigned int r; void *mapbase = lchip->base; unsigned long flags; spin_lock_irqsave(&lchip->lock, flags); /* Start */ udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_START_HOLD_TIME); /* 5.0 usec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ /* Send slave address and W bit (LSB is W bit) */ data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); goto out; } /* Send Sub address (LSB is channel select) */ /* channel = 0 : ch1 select */ /* = 1 : ch2 select */ data = M62332_SUB_ADDR + channel; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); goto out; } /* Send DAC data */ for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, dac_data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); } out: /* stop */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_m62332_senddata); /* * Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf) { unsigned long flags; struct locomo *lchip = locomo_chip_driver(dev); if (vr) locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1); else locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0); spin_lock_irqsave(&lchip->lock, flags); locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); udelay(100); locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_frontlight_set); /* * LoCoMo "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int locomo_match(struct device *_dev, struct device_driver *_drv) { struct locomo_dev *dev = LOCOMO_DEV(_dev); struct locomo_driver *drv = LOCOMO_DRV(_drv); return dev->devid == drv->devid; } static int locomo_bus_suspend(struct device *dev, pm_message_t state) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->suspend) ret = drv->suspend(ldev, state); return ret; } static int locomo_bus_resume(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->resume) ret = drv->resume(ldev); return ret; } static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(ldev); return ret; } static int locomo_bus_remove(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv->remove) ret = drv->remove(ldev); return ret; } struct bus_type locomo_bus_type = { .name = "locomo-bus", .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, .suspend = locomo_bus_suspend, .resume = locomo_bus_resume, }; int locomo_driver_register(struct locomo_driver *driver) { driver->drv.bus = &locomo_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(locomo_driver_register); void locomo_driver_unregister(struct locomo_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(locomo_driver_unregister); static int __init locomo_init(void) { int ret = bus_register(&locomo_bus_type); if (ret == 0) platform_driver_register(&locomo_device_driver); return ret; } static void __exit locomo_exit(void) { platform_driver_unregister(&locomo_device_driver); bus_unregister(&locomo_bus_type); } module_init(locomo_init); module_exit(locomo_exit); MODULE_DESCRIPTION("Sharp LoCoMo core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
gpl-2.0
nyterage/Galaxy_Tab_3_217s
arch/arm/common/locomo.c
12018
24146
/* * linux/arch/arm/common/locomo.c * * Sharp LoCoMo support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains all generic LoCoMo support. * * All initialization functions provided here are intended to be called * from machine specific code with proper arguments when required. * * Based on sa1111.c */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <asm/hardware/locomo.h> /* LoCoMo Interrupts */ #define IRQ_LOCOMO_KEY (0) #define IRQ_LOCOMO_GPIO (1) #define IRQ_LOCOMO_LT (2) #define IRQ_LOCOMO_SPI (3) /* M62332 output channel selection */ #define M62332_EVR_CH 1 /* M62332 volume channel number */ /* 0 : CH.1 , 1 : CH. 2 */ /* DAC send data */ #define M62332_SLAVE_ADDR 0x4e /* Slave address */ #define M62332_W_BIT 0x00 /* W bit (0 only) */ #define M62332_SUB_ADDR 0x00 /* Sub address */ #define M62332_A_BIT 0x00 /* A bit (0 only) */ /* DAC setup and hold times (expressed in us) */ #define DAC_BUS_FREE_TIME 5 /* 4.7 us */ #define DAC_START_SETUP_TIME 5 /* 4.7 us */ #define DAC_STOP_SETUP_TIME 4 /* 4.0 us */ #define DAC_START_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */ #define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */ #define DAC_DATA_SETUP_TIME 1 /* 250 ns */ #define DAC_DATA_HOLD_TIME 1 /* 300 ns */ #define DAC_LOW_SETUP_TIME 1 /* 300 ns */ #define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */ /* the following is the overall data for the locomo chip */ struct locomo { struct device *dev; unsigned long phys; unsigned int irq; int irq_base; spinlock_t lock; void __iomem *base; #ifdef CONFIG_PM void *saved_state; #endif }; struct locomo_dev_info { unsigned long offset; unsigned long length; unsigned int devid; unsigned int irq[1]; const char * name; }; /* All the locomo devices. If offset is non-zero, the mapbase for the * locomo_dev will be set to the chip base plus offset. If offset is * zero, then the mapbase for the locomo_dev will be set to zero. An * offset of zero means the device only uses GPIOs or other helper * functions inside this file */ static struct locomo_dev_info locomo_devices[] = { { .devid = LOCOMO_DEVID_KEYBOARD, .irq = { IRQ_LOCOMO_KEY }, .name = "locomo-keyboard", .offset = LOCOMO_KEYBOARD, .length = 16, }, { .devid = LOCOMO_DEVID_FRONTLIGHT, .irq = {}, .name = "locomo-frontlight", .offset = LOCOMO_FRONTLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_BACKLIGHT, .irq = {}, .name = "locomo-backlight", .offset = LOCOMO_BACKLIGHT, .length = 8, }, { .devid = LOCOMO_DEVID_AUDIO, .irq = {}, .name = "locomo-audio", .offset = LOCOMO_AUDIO, .length = 4, }, { .devid = LOCOMO_DEVID_LED, .irq = {}, .name = "locomo-led", .offset = LOCOMO_LED, .length = 8, }, { .devid = LOCOMO_DEVID_UART, .irq = {}, .name = "locomo-uart", .offset = 0, .length = 0, }, { .devid = LOCOMO_DEVID_SPI, .irq = {}, .name = "locomo-spi", .offset = LOCOMO_SPI, .length = 0x30, }, }; static void locomo_handler(unsigned int irq, struct irq_desc *desc) { struct locomo *lchip = irq_get_chip_data(irq); int req, i; /* Acknowledge the parent IRQ */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* check why this interrupt was generated */ req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; if (req) { /* generate the next interrupt(s) */ irq = lchip->irq_base; for (i = 0; i <= 3; i++, irq++) { if (req & (0x0100 << i)) { generic_handle_irq(irq); } } } } static void locomo_ack_irq(struct irq_data *d) { } static void locomo_mask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r &= ~(0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static void locomo_unmask_irq(struct irq_data *d) { struct locomo *lchip = irq_data_get_irq_chip_data(d); unsigned int r; r = locomo_readl(lchip->base + LOCOMO_ICR); r |= (0x0010 << (d->irq - lchip->irq_base)); locomo_writel(r, lchip->base + LOCOMO_ICR); } static struct irq_chip locomo_chip = { .name = "LOCOMO", .irq_ack = locomo_ack_irq, .irq_mask = locomo_mask_irq, .irq_unmask = locomo_unmask_irq, }; static void locomo_setup_irq(struct locomo *lchip) { int irq = lchip->irq_base; /* * Install handler for IRQ_LOCOMO_HW. */ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); irq_set_chip_data(lchip->irq, lchip); irq_set_chained_handler(lchip->irq, locomo_handler); /* Install handlers for IRQ_LOCOMO_* */ for ( ; irq <= lchip->irq_base + 3; irq++) { irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq); irq_set_chip_data(irq, lchip); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } static void locomo_dev_release(struct device *_dev) { struct locomo_dev *dev = LOCOMO_DEV(_dev); kfree(dev); } static int locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info) { struct locomo_dev *dev; int ret; dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto out; } /* * If the parent device has a DMA mask associated with it, * propagate it down to the children. */ if (lchip->dev->dma_mask) { dev->dma_mask = *lchip->dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask; } dev_set_name(&dev->dev, "%s", info->name); dev->devid = info->devid; dev->dev.parent = lchip->dev; dev->dev.bus = &locomo_bus_type; dev->dev.release = locomo_dev_release; dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; if (info->offset) dev->mapbase = lchip->base + info->offset; else dev->mapbase = 0; dev->length = info->length; dev->irq[0] = (lchip->irq_base == NO_IRQ) ? NO_IRQ : lchip->irq_base + info->irq[0]; ret = device_register(&dev->dev); if (ret) { out: kfree(dev); } return ret; } #ifdef CONFIG_PM struct locomo_save_data { u16 LCM_GPO; u16 LCM_SPICT; u16 LCM_GPE; u16 LCM_ASD; u16 LCM_SPIMD; }; static int locomo_suspend(struct platform_device *dev, pm_message_t state) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long flags; save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL); if (!save) return -ENOMEM; lchip->saved_state = save; spin_lock_irqsave(&lchip->lock, flags); save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPO); save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */ locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */ locomo_writel(0x00, lchip->base + LOCOMO_GPE); save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */ locomo_writel(0x00, lchip->base + LOCOMO_ASD); save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */ locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_PAIF); locomo_writel(0x00, lchip->base + LOCOMO_DAC); locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC); if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88)) locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */ else /* 18MHz already enabled, so no wait */ locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */ locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/ locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */ locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */ spin_unlock_irqrestore(&lchip->lock, flags); return 0; } static int locomo_resume(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); struct locomo_save_data *save; unsigned long r; unsigned long flags; save = lchip->saved_state; if (!save) return 0; spin_lock_irqsave(&lchip->lock, flags); locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO); locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT); locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE); locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD); locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); locomo_writel(0x00, lchip->base + LOCOMO_C32K); locomo_writel(0x90, lchip->base + LOCOMO_TADC); locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC); r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); r &= 0xFEFF; locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD); spin_unlock_irqrestore(&lchip->lock, flags); lchip->saved_state = NULL; kfree(save); return 0; } #endif /** * locomo_probe - probe for a single LoCoMo chip. * @phys_addr: physical address of device. * * Probe for a LoCoMo chip. This must be called * before any other locomo-specific code. * * Returns: * %-ENODEV device not found. * %-EBUSY physical address already marked in-use. * %0 successful. */ static int __locomo_probe(struct device *me, struct resource *mem, int irq) { struct locomo_platform_data *pdata = me->platform_data; struct locomo *lchip; unsigned long r; int i, ret = -ENODEV; lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL); if (!lchip) return -ENOMEM; spin_lock_init(&lchip->lock); lchip->dev = me; dev_set_drvdata(lchip->dev, lchip); lchip->phys = mem->start; lchip->irq = irq; lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ; /* * Map the whole region. This also maps the * registers for our children. */ lchip->base = ioremap(mem->start, PAGE_SIZE); if (!lchip->base) { ret = -ENOMEM; goto out; } /* locomo initialize */ locomo_writel(0, lchip->base + LOCOMO_ICR); /* KEYBOARD */ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC); /* GPIO */ locomo_writel(0, lchip->base + LOCOMO_GPO); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPE); locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) , lchip->base + LOCOMO_GPD); locomo_writel(0, lchip->base + LOCOMO_GIE); /* Frontlight */ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); /* Longtime timer */ locomo_writel(0, lchip->base + LOCOMO_LTINT); /* SPI */ locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE); locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); r = locomo_readl(lchip->base + LOCOMO_ASD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_ASD); locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); r = locomo_readl(lchip->base + LOCOMO_HSD); r |= 0x8000; locomo_writel(r, lchip->base + LOCOMO_HSD); locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); /* XON */ locomo_writel(0x80, lchip->base + LOCOMO_TADC); udelay(1000); /* CLK9MEN */ r = locomo_readl(lchip->base + LOCOMO_TADC); r |= 0x10; locomo_writel(r, lchip->base + LOCOMO_TADC); udelay(100); /* init DAC */ r = locomo_readl(lchip->base + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, lchip->base + LOCOMO_DAC); r = locomo_readl(lchip->base + LOCOMO_VER); printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); /* * The interrupt controller must be initialised before any * other device to ensure that the interrupts are available. */ if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ) locomo_setup_irq(lchip); for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) locomo_init_one_child(lchip, &locomo_devices[i]); return 0; out: kfree(lchip); return ret; } static int locomo_remove_child(struct device *dev, void *data) { device_unregister(dev); return 0; } static void __locomo_remove(struct locomo *lchip) { device_for_each_child(lchip->dev, NULL, locomo_remove_child); if (lchip->irq != NO_IRQ) { irq_set_chained_handler(lchip->irq, NULL); irq_set_handler_data(lchip->irq, NULL); } iounmap(lchip->base); kfree(lchip); } static int locomo_probe(struct platform_device *dev) { struct resource *mem; int irq; mem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!mem) return -EINVAL; irq = platform_get_irq(dev, 0); if (irq < 0) return -ENXIO; return __locomo_probe(&dev->dev, mem, irq); } static int locomo_remove(struct platform_device *dev) { struct locomo *lchip = platform_get_drvdata(dev); if (lchip) { __locomo_remove(lchip); platform_set_drvdata(dev, NULL); } return 0; } /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at * the per-machine level, and then have this driver pick * up the registered devices. */ static struct platform_driver locomo_device_driver = { .probe = locomo_probe, .remove = locomo_remove, #ifdef CONFIG_PM .suspend = locomo_suspend, .resume = locomo_resume, #endif .driver = { .name = "locomo", }, }; /* * Get the parent device driver (us) structure * from a child function device */ static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) { return (struct locomo *)dev_get_drvdata(ldev->dev.parent); } void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPD); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPD); r = locomo_readl(lchip->base + LOCOMO_GPE); if (dir) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPE); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_set_dir); int locomo_gpio_read_level(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPL); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_level); int locomo_gpio_read_output(struct device *dev, unsigned int bits) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int ret; if (!lchip) return -ENODEV; spin_lock_irqsave(&lchip->lock, flags); ret = locomo_readl(lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); ret &= bits; return ret; } EXPORT_SYMBOL(locomo_gpio_read_output); void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set) { struct locomo *lchip = dev_get_drvdata(dev); unsigned long flags; unsigned int r; if (!lchip) return; spin_lock_irqsave(&lchip->lock, flags); r = locomo_readl(lchip->base + LOCOMO_GPO); if (set) r |= bits; else r &= ~bits; locomo_writel(r, lchip->base + LOCOMO_GPO); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_gpio_write); static void locomo_m62332_sendbit(void *mapbase, int bit) { unsigned int r; r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ if (bit & 1) { r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ } else { r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ } udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ } void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel) { struct locomo *lchip = locomo_chip_driver(ldev); int i; unsigned char data; unsigned int r; void *mapbase = lchip->base; unsigned long flags; spin_lock_irqsave(&lchip->lock, flags); /* Start */ udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_START_HOLD_TIME); /* 5.0 usec */ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */ /* Send slave address and W bit (LSB is W bit) */ data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 1\n"); goto out; } /* Send Sub address (LSB is channel select) */ /* channel = 0 : ch1 select */ /* = 1 : ch2 select */ data = M62332_SUB_ADDR + channel; for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 2\n"); goto out; } /* Send DAC data */ for (i = 1; i <= 8; i++) { locomo_m62332_sendbit(mapbase, dac_data >> (8 - i)); } /* Check A bit */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SDAOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */ printk(KERN_WARNING "locomo: m62332_senddata Error 3\n"); } out: /* stop */ r = locomo_readl(mapbase + LOCOMO_DAC); r &= ~(LOCOMO_DAC_SCLOEB); locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */ r = locomo_readl(mapbase + LOCOMO_DAC); r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; locomo_writel(r, mapbase + LOCOMO_DAC); udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */ spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_m62332_senddata); /* * Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf) { unsigned long flags; struct locomo *lchip = locomo_chip_driver(dev); if (vr) locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1); else locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0); spin_lock_irqsave(&lchip->lock, flags); locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); udelay(100); locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD); locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); spin_unlock_irqrestore(&lchip->lock, flags); } EXPORT_SYMBOL(locomo_frontlight_set); /* * LoCoMo "Register Access Bus." * * We model this as a regular bus type, and hang devices directly * off this. */ static int locomo_match(struct device *_dev, struct device_driver *_drv) { struct locomo_dev *dev = LOCOMO_DEV(_dev); struct locomo_driver *drv = LOCOMO_DRV(_drv); return dev->devid == drv->devid; } static int locomo_bus_suspend(struct device *dev, pm_message_t state) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->suspend) ret = drv->suspend(ldev, state); return ret; } static int locomo_bus_resume(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv && drv->resume) ret = drv->resume(ldev); return ret; } static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = -ENODEV; if (drv->probe) ret = drv->probe(ldev); return ret; } static int locomo_bus_remove(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); struct locomo_driver *drv = LOCOMO_DRV(dev->driver); int ret = 0; if (drv->remove) ret = drv->remove(ldev); return ret; } struct bus_type locomo_bus_type = { .name = "locomo-bus", .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, .suspend = locomo_bus_suspend, .resume = locomo_bus_resume, }; int locomo_driver_register(struct locomo_driver *driver) { driver->drv.bus = &locomo_bus_type; return driver_register(&driver->drv); } EXPORT_SYMBOL(locomo_driver_register); void locomo_driver_unregister(struct locomo_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(locomo_driver_unregister); static int __init locomo_init(void) { int ret = bus_register(&locomo_bus_type); if (ret == 0) platform_driver_register(&locomo_device_driver); return ret; } static void __exit locomo_exit(void) { platform_driver_unregister(&locomo_device_driver); bus_unregister(&locomo_bus_type); } module_init(locomo_init); module_exit(locomo_exit); MODULE_DESCRIPTION("Sharp LoCoMo core driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
gpl-2.0
mdxy2010/forlinux-ok6410
kernel/drivers/net/cxgb3/aq100x.c
14066
8938
/* * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "common.h" #include "regs.h" enum { /* MDIO_DEV_PMA_PMD registers */ AQ_LINK_STAT = 0xe800, AQ_IMASK_PMA = 0xf000, /* MDIO_DEV_XGXS registers */ AQ_XAUI_RX_CFG = 0xc400, AQ_XAUI_TX_CFG = 0xe400, /* MDIO_DEV_ANEG registers */ AQ_1G_CTRL = 0xc400, AQ_ANEG_STAT = 0xc800, /* MDIO_DEV_VEND1 registers */ AQ_FW_VERSION = 0x0020, AQ_IFLAG_GLOBAL = 0xfc00, AQ_IMASK_GLOBAL = 0xff00, }; enum { IMASK_PMA = 1 << 2, IMASK_GLOBAL = 1 << 15, ADV_1G_FULL = 1 << 15, ADV_1G_HALF = 1 << 14, ADV_10G_FULL = 1 << 12, AQ_RESET = (1 << 14) | (1 << 15), AQ_LOWPOWER = 1 << 12, }; static int aq100x_reset(struct cphy *phy, int wait) { /* * Ignore the caller specified wait time; always wait for the reset to * complete. Can take up to 3s. */ int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000); if (err) CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n", phy->mdio.prtad, err); return err; } static int aq100x_intr_enable(struct cphy *phy) { int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA); if (err) return err; err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL); return err; } static int aq100x_intr_disable(struct cphy *phy) { return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0); } static int aq100x_intr_clear(struct cphy *phy) { unsigned int v; t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v); t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v); return 0; } static int aq100x_intr_handler(struct cphy *phy) { int err; unsigned int cause, v; err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause); if (err) return err; /* Read (and reset) the latching version of the status */ t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v); return cphy_cause_link_change; } static int aq100x_power_down(struct cphy *phy, int off) { return mdio_set_flag(&phy->mdio, phy->mdio.prtad, MDIO_MMD_PMAPMD, MDIO_CTRL1, MDIO_CTRL1_LPOWER, off); } static int aq100x_autoneg_enable(struct cphy *phy) { int err; err = aq100x_power_down(phy, 0); if (!err) err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, MDIO_MMD_AN, MDIO_CTRL1, BMCR_ANENABLE | BMCR_ANRESTART, 1); return err; } static int aq100x_autoneg_restart(struct cphy *phy) { int err; err = aq100x_power_down(phy, 0); if (!err) err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, MDIO_MMD_AN, MDIO_CTRL1, BMCR_ANENABLE | BMCR_ANRESTART, 1); return err; } static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map) { unsigned int adv; int err; /* 10G advertisement */ adv = 0; if (advertise_map & ADVERTISED_10000baseT_Full) adv |= ADV_10G_FULL; err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, ADV_10G_FULL, adv); if (err) return err; /* 1G advertisement */ adv = 0; if (advertise_map & ADVERTISED_1000baseT_Full) adv |= ADV_1G_FULL; if (advertise_map & ADVERTISED_1000baseT_Half) adv |= ADV_1G_HALF; err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL, ADV_1G_FULL | ADV_1G_HALF, adv); if (err) return err; /* 100M, pause advertisement */ adv = 0; if (advertise_map & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; if (advertise_map & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (advertise_map & ADVERTISED_Pause) adv |= ADVERTISE_PAUSE_CAP; if (advertise_map & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE, 0xfe0, adv); return err; } static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable) { return mdio_set_flag(&phy->mdio, phy->mdio.prtad, MDIO_MMD_PMAPMD, MDIO_CTRL1, BMCR_LOOPBACK, enable); } static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex) { /* no can do */ return -1; } static int aq100x_get_link_status(struct cphy *phy, int *link_ok, int *speed, int *duplex, int *fc) { int err; unsigned int v; if (link_ok) { err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v); if (err) return err; *link_ok = v & 1; if (!*link_ok) return 0; } err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v); if (err) return err; if (speed) { switch (v & 0x6) { case 0x6: *speed = SPEED_10000; break; case 0x4: *speed = SPEED_1000; break; case 0x2: *speed = SPEED_100; break; case 0x0: *speed = SPEED_10; break; } } if (duplex) *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF; return 0; } static struct cphy_ops aq100x_ops = { .reset = aq100x_reset, .intr_enable = aq100x_intr_enable, .intr_disable = aq100x_intr_disable, .intr_clear = aq100x_intr_clear, .intr_handler = aq100x_intr_handler, .autoneg_enable = aq100x_autoneg_enable, .autoneg_restart = aq100x_autoneg_restart, .advertise = aq100x_advertise, .set_loopback = aq100x_set_loopback, .set_speed_duplex = aq100x_set_speed_duplex, .get_link_status = aq100x_get_link_status, .power_down = aq100x_power_down, .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, }; int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { unsigned int v, v2, gpio, wait; int err; cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops, SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI, "1000/10GBASE-T"); /* * The PHY has been out of reset ever since the system powered up. So * we do a hard reset over here. */ gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL; t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0); msleep(1); t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio); /* * Give it enough time to load the firmware and get ready for mdio. */ msleep(1000); wait = 500; /* in 10ms increments */ do { err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v); if (err || v == 0xffff) { /* Allow prep_adapter to succeed when ffff is read */ CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n", phy_addr, err, v); goto done; } v &= AQ_RESET; if (v) msleep(10); } while (v && --wait); if (v) { CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n", phy_addr, v); goto done; /* let prep_adapter succeed */ } /* Datasheet says 3s max but this has been observed */ wait = (500 - wait) * 10 + 1000; if (wait > 3000) CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait); /* Firmware version check. */ t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v); if (v != 101) CH_WARN(adapter, "PHY%d: unsupported firmware %d\n", phy_addr, v); /* * The PHY should start in really-low-power mode. Prepare it for normal * operations. */ err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v); if (err) return err; if (v & AQ_LOWPOWER) { err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1, AQ_LOWPOWER, 0); if (err) return err; msleep(10); } else CH_WARN(adapter, "PHY%d does not start in low power mode.\n", phy_addr); /* * Verify XAUI settings, but let prep succeed no matter what. */ v = v2 = 0; t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v); t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2); if (v != 0x1b || v2 != 0x1b) CH_WARN(adapter, "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n", phy_addr, v, v2); done: return err; }
gpl-2.0
exynos-reference/kernel
drivers/usb/serial/zte_ev.c
243
8813
/* * ZTE_EV USB serial driver * * Copyright (C) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org> * Copyright (C) 2012 Linux Foundation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is based on code found in a ZTE_ENV patch that modified * the usb-serial generic driver. Comments were left in that I think * show the commands used to talk to the device, but I am not sure. */ #include <linux/kernel.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/uaccess.h> #define MAX_SETUP_DATA_SIZE 32 static void debug_data(struct device *dev, const char *function, int len, const unsigned char *data, int result) { dev_dbg(dev, "result = %d\n", result); if (result == len) dev_dbg(dev, "%s - length = %d, data = %*ph\n", function, len, len, data); } static int zte_ev_usb_serial_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_device *udev = port->serial->dev; struct device *dev = &port->dev; int result = 0; int len; unsigned char *buf; buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* send 1st ctl cmd(CTL 21 22 01 00 00 00 00 00) */ len = 0; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0001, 0x0000, NULL, len, USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 2st cmd and receive data */ /* * 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 25.1.0(5) * 16.0 DI 00 96 00 00 00 00 08 */ len = 0x0007; result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 3rd cmd */ /* * 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 30.1.0 * 16.0 DO 80 25 00 00 00 00 08 .%..... 30.2.0 */ len = 0x0007; buf[0] = 0x80; buf[1] = 0x25; buf[2] = 0x00; buf[3] = 0x00; buf[4] = 0x00; buf[5] = 0x00; buf[6] = 0x08; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 4th cmd */ /* * 16.0 CTL 21 22 03 00 00 00 00 00 */ len = 0; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 5th cmd */ /* * 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 33.1.0 * 16.0 DI 80 25 00 00 00 00 08 */ len = 0x0007; result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 6th cmd */ /* * 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 34.1.0 * 16.0 DO 80 25 00 00 00 00 08 */ len = 0x0007; buf[0] = 0x80; buf[1] = 0x25; buf[2] = 0x00; buf[3] = 0x00; buf[4] = 0x00; buf[5] = 0x00; buf[6] = 0x08; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); kfree(buf); return usb_serial_generic_open(tty, port); } /* * CTL 21 22 02 00 00 00 00 00 CLASS 338.1.0 * * 16.1 DI a1 20 00 00 00 00 02 00 02 00 . ........ 340.1.0 * 16.0 CTL 21 22 03 00 00 00 00 00 CLASS 341.1.0 * * 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 346.1.0(3) * 16.0 DI 00 08 07 00 00 00 08 ....... 346.2.0 * * 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 349.1.0 * 16.0 DO 00 c2 01 00 00 00 08 ....... 349.2.0 * * 16.0 CTL 21 22 03 00 00 00 00 00 CLASS 350.1.0(2) * * 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 352.1.0 * 16.0 DI 00 c2 01 00 00 00 08 ....... 352.2.0 * * 16.1 DI a1 20 00 00 00 00 02 00 02 00 . ........ 353.1.0 * * 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 354.1.0 * 16.0 DO 00 c2 01 00 00 00 08 ....... 354.2.0 * * 16.0 CTL 21 22 03 00 00 00 00 00 */ static void zte_ev_usb_serial_close(struct usb_serial_port *port) { struct usb_device *udev = port->serial->dev; struct device *dev = &port->dev; int result = 0; int len; unsigned char *buf; buf = kmalloc(MAX_SETUP_DATA_SIZE, GFP_KERNEL); if (!buf) return; /* send 1st ctl cmd(CTL 21 22 02 00 00 00 00 00) */ len = 0; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0002, 0x0000, NULL, len, USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 2st ctl cmd(CTL 21 22 03 00 00 00 00 00 ) */ len = 0; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 3st cmd and recieve data */ /* * 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 25.1.0(5) * 16.0 DI 00 08 07 00 00 00 08 */ len = 0x0007; result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 4th cmd */ /* * 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 30.1.0 * 16.0 DO 00 c2 01 00 00 00 08 .%..... 30.2.0 */ len = 0x0007; buf[0] = 0x00; buf[1] = 0xc2; buf[2] = 0x01; buf[3] = 0x00; buf[4] = 0x00; buf[5] = 0x00; buf[6] = 0x08; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 5th cmd */ /* * 16.0 CTL 21 22 03 00 00 00 00 00 */ len = 0; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); /* send 6th cmd */ /* * 16.0 CTL a1 21 00 00 00 00 07 00 CLASS 33.1.0 * 16.0 DI 00 c2 01 00 00 00 08 */ len = 0x0007; result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x21, 0xa1, 0x0000, 0x0000, buf, len, USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 7th cmd */ /* * 16.0 CTL 21 20 00 00 00 00 07 00 CLASS 354.1.0 * 16.0 DO 00 c2 01 00 00 00 08 ....... 354.2.0 */ len = 0x0007; buf[0] = 0x00; buf[1] = 0xc2; buf[2] = 0x01; buf[3] = 0x00; buf[4] = 0x00; buf[5] = 0x00; buf[6] = 0x08; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x20, 0x21, 0x0000, 0x0000, buf, len, USB_CTRL_GET_TIMEOUT); debug_data(dev, __func__, len, buf, result); /* send 8th cmd */ /* * 16.0 CTL 21 22 03 00 00 00 00 00 */ len = 0; result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x22, 0x21, 0x0003, 0x0000, NULL, len, USB_CTRL_GET_TIMEOUT); dev_dbg(dev, "result = %d\n", result); kfree(buf); usb_serial_generic_close(port); } static const struct usb_device_id id_table[] = { /* AC8710, AC8710T */ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) }, /* AC8700 */ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) }, /* MG880 */ { USB_DEVICE(0x19d2, 0xfffd) }, { USB_DEVICE(0x19d2, 0xfffc) }, { USB_DEVICE(0x19d2, 0xfffb) }, /* AC8710_V3 */ { USB_DEVICE(0x19d2, 0xfff6) }, { USB_DEVICE(0x19d2, 0xfff7) }, { USB_DEVICE(0x19d2, 0xfff8) }, { USB_DEVICE(0x19d2, 0xfff9) }, { USB_DEVICE(0x19d2, 0xffee) }, /* AC2716, MC2716 */ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) }, /* AD3812 */ { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) }, { USB_DEVICE(0x19d2, 0xffec) }, { USB_DEVICE(0x05C6, 0x3197) }, { USB_DEVICE(0x05C6, 0x6000) }, { USB_DEVICE(0x05C6, 0x9008) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver zio_device = { .driver = { .owner = THIS_MODULE, .name = "zte_ev", }, .id_table = id_table, .num_ports = 1, .open = zte_ev_usb_serial_open, .close = zte_ev_usb_serial_close, }; static struct usb_serial_driver * const serial_drivers[] = { &zio_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_LICENSE("GPL v2");
gpl-2.0
emonty/deb-vhd-util
tools/qemu-xen/tests/tcg/mips/mips64-dsp/absq_s_ob.c
243
1052
#include "io.h" int main(void) { long long rd, rt, result, dspcontrol; rt = 0x7F7F7F7F7F7F7F7F; result = 0x7F7F7F7F7F7F7F7F; __asm (".set mips64\n\t" "absq_s.ob %0 %1\n\t" : "=r"(rd) : "r"(rt) ); if (result != rd) { printf("absq_s.ob test 1 error\n"); return -1; } __asm ("rddsp %0\n\t" : "=r"(rd) ); rd >> 20; rd = rd & 0x1; if (rd != 0) { printf("absq_s.ob test 1 dspcontrol overflow flag error\n"); return -1; } rt = 0x80FFFFFFFFFFFFFF; result = 0x7F01010101010101; __asm ("absq_s.ob %0, %1\n\t" : "=r"(rd) : "r"(rt) ); if (result != rd) { printf("absq_s.ob test 2 error\n"); return -1; } __asm ("rddsp %0\n\t" : "=r"(rd) ); rd = rd >> 20; rd = rd & 0x1; if (rd != 1) { printf("absq_s.ob test 2 dspcontrol overflow flag error\n"); return -1; } return 0; }
gpl-2.0
atalax/linux
drivers/clk/samsung/clk-exynos-clkout.c
499
4159
/* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Tomasz Figa <t.figa@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Clock driver for Exynos clock output */ #include <linux/slab.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/syscore_ops.h> #define EXYNOS_CLKOUT_NR_CLKS 1 #define EXYNOS_CLKOUT_PARENTS 32 #define EXYNOS_PMU_DEBUG_REG 0xa00 #define EXYNOS_CLKOUT_DISABLE_SHIFT 0 #define EXYNOS_CLKOUT_MUX_SHIFT 8 #define EXYNOS4_CLKOUT_MUX_MASK 0xf #define EXYNOS5_CLKOUT_MUX_MASK 0x1f struct exynos_clkout { struct clk_gate gate; struct clk_mux mux; spinlock_t slock; struct clk_onecell_data data; struct clk *clk_table[EXYNOS_CLKOUT_NR_CLKS]; void __iomem *reg; u32 pmu_debug_save; }; static struct exynos_clkout *clkout; static int exynos_clkout_suspend(void) { clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG); return 0; } static void exynos_clkout_resume(void) { writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG); } static struct syscore_ops exynos_clkout_syscore_ops = { .suspend = exynos_clkout_suspend, .resume = exynos_clkout_resume, }; static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask) { const char *parent_names[EXYNOS_CLKOUT_PARENTS]; struct clk *parents[EXYNOS_CLKOUT_PARENTS]; int parent_count; int ret; int i; clkout = kzalloc(sizeof(*clkout), GFP_KERNEL); if (!clkout) return; spin_lock_init(&clkout->slock); parent_count = 0; for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i) { char name[] = "clkoutXX"; snprintf(name, sizeof(name), "clkout%d", i); parents[i] = of_clk_get_by_name(node, name); if (IS_ERR(parents[i])) { parent_names[i] = "none"; continue; } parent_names[i] = __clk_get_name(parents[i]); parent_count = i + 1; } if (!parent_count) goto free_clkout; clkout->reg = of_iomap(node, 0); if (!clkout->reg) goto clks_put; clkout->gate.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG; clkout->gate.bit_idx = EXYNOS_CLKOUT_DISABLE_SHIFT; clkout->gate.flags = CLK_GATE_SET_TO_DISABLE; clkout->gate.lock = &clkout->slock; clkout->mux.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG; clkout->mux.mask = mux_mask; clkout->mux.shift = EXYNOS_CLKOUT_MUX_SHIFT; clkout->mux.lock = &clkout->slock; clkout->clk_table[0] = clk_register_composite(NULL, "clkout", parent_names, parent_count, &clkout->mux.hw, &clk_mux_ops, NULL, NULL, &clkout->gate.hw, &clk_gate_ops, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); if (IS_ERR(clkout->clk_table[0])) goto err_unmap; clkout->data.clks = clkout->clk_table; clkout->data.clk_num = EXYNOS_CLKOUT_NR_CLKS; ret = of_clk_add_provider(node, of_clk_src_onecell_get, &clkout->data); if (ret) goto err_clk_unreg; register_syscore_ops(&exynos_clkout_syscore_ops); return; err_clk_unreg: clk_unregister(clkout->clk_table[0]); err_unmap: iounmap(clkout->reg); clks_put: for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i) if (!IS_ERR(parents[i])) clk_put(parents[i]); free_clkout: kfree(clkout); pr_err("%s: failed to register clkout clock\n", __func__); } static void __init exynos4_clkout_init(struct device_node *node) { exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK); } CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu", exynos4_clkout_init); CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu", exynos4_clkout_init); CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu", exynos4_clkout_init); CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu", exynos4_clkout_init); static void __init exynos5_clkout_init(struct device_node *node) { exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK); } CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu", exynos5_clkout_init); CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu", exynos5_clkout_init); CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu", exynos5_clkout_init);
gpl-2.0
willcharlton/linux
drivers/net/wireless/rt2x00/rt2x00usb.c
1011
22431
/* Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. */ /* Module: rt2x00usb Abstract: rt2x00 generic usb device routines. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/bug.h> #include "rt2x00.h" #include "rt2x00usb.h" /* * Interfacing with the HW. */ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, const u16 value, void *buffer, const u16 buffer_length, const int timeout) { struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); int status; unsigned int pipe = (requesttype == USB_VENDOR_REQUEST_IN) ? usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); unsigned long expire = jiffies + msecs_to_jiffies(timeout); if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return -ENODEV; do { status = usb_control_msg(usb_dev, pipe, request, requesttype, value, offset, buffer, buffer_length, timeout / 2); if (status >= 0) return 0; if (status == -ENODEV) { /* Device has disappeared. */ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); break; } } while (time_before(jiffies, expire)); rt2x00_err(rt2x00dev, "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n", request, offset, status); return status; } EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request); int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, const u16 buffer_length, const int timeout) { int status; BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex)); /* * Check for Cache availability. */ if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) { rt2x00_err(rt2x00dev, "CSR cache not available\n"); return -ENOMEM; } if (requesttype == USB_VENDOR_REQUEST_OUT) memcpy(rt2x00dev->csr.cache, buffer, buffer_length); status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype, offset, 0, rt2x00dev->csr.cache, buffer_length, timeout); if (!status && requesttype == USB_VENDOR_REQUEST_IN) memcpy(buffer, rt2x00dev->csr.cache, buffer_length); return status; } EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock); int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, const u16 buffer_length) { int status = 0; unsigned char *tb; u16 off, len, bsize; mutex_lock(&rt2x00dev->csr_mutex); tb = (char *)buffer; off = offset; len = buffer_length; while (len && !status) { bsize = min_t(u16, CSR_CACHE_SIZE, len); status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, requesttype, off, tb, bsize, REGISTER_TIMEOUT); tb += bsize; len -= bsize; off += bsize; } mutex_unlock(&rt2x00dev->csr_mutex); return status; } EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, const struct rt2x00_field32 field, u32 *reg) { unsigned int i; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return -ENODEV; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { rt2x00usb_register_read_lock(rt2x00dev, offset, reg); if (!rt2x00_get_field32(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read); struct rt2x00_async_read_data { __le32 reg; struct usb_ctrlrequest cr; struct rt2x00_dev *rt2x00dev; bool (*callback)(struct rt2x00_dev *, int, u32); }; static void rt2x00usb_register_read_async_cb(struct urb *urb) { struct rt2x00_async_read_data *rd = urb->context; if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) { if (usb_submit_urb(urb, GFP_ATOMIC) < 0) kfree(rd); } else kfree(rd); } void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev, const unsigned int offset, bool (*callback)(struct rt2x00_dev*, int, u32)) { struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct urb *urb; struct rt2x00_async_read_data *rd; rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (!rd) return; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { kfree(rd); return; } rd->rt2x00dev = rt2x00dev; rd->callback = callback; rd->cr.bRequestType = USB_VENDOR_REQUEST_IN; rd->cr.bRequest = USB_MULTI_READ; rd->cr.wValue = 0; rd->cr.wIndex = cpu_to_le16(offset); rd->cr.wLength = cpu_to_le16(sizeof(u32)); usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0), (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg), rt2x00usb_register_read_async_cb, rd); if (usb_submit_urb(urb, GFP_ATOMIC) < 0) kfree(rd); usb_free_urb(urb); } EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async); /* * TX data handlers. */ static void rt2x00usb_work_txdone_entry(struct queue_entry *entry) { /* * If the transfer to hardware succeeded, it does not mean the * frame was send out correctly. It only means the frame * was successfully pushed to the hardware, we have no * way to determine the transmission status right now. * (Only indirectly by looking at the failed TX counters * in the register). */ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); else rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); } static void rt2x00usb_work_txdone(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, txdone_work); struct data_queue *queue; struct queue_entry *entry; tx_queue_for_each(rt2x00dev, queue) { while (!rt2x00queue_empty(queue)) { entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) break; rt2x00usb_work_txdone_entry(entry); } } } static void rt2x00usb_interrupt_txdone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return; /* * Check if the frame was correctly uploaded */ if (urb->status) set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); /* * Report the frame as DMA done */ rt2x00lib_dmadone(entry); if (rt2x00dev->ops->lib->tx_dma_done) rt2x00dev->ops->lib->tx_dma_done(entry); /* * Schedule the delayed work for reading the TX status * from the device. */ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO) || !kfifo_is_empty(&rt2x00dev->txstatus_fifo)) queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); } static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb *entry_priv = entry->priv_data; u32 length; int status; if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) || test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) return false; /* * USB devices require certain padding at the end of each frame * and urb. Those paddings are not included in skbs. Pass entry * to the driver to determine what the overall length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); status = skb_padto(entry->skb, length); if (unlikely(status)) { /* TODO: report something more appropriate than IO_FAILED. */ rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n"); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); return false; } usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), entry->skb->data, length, rt2x00usb_interrupt_txdone, entry); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { if (status == -ENODEV) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); } return false; } /* * RX data handlers. */ static void rt2x00usb_work_rxdone(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, rxdone_work); struct queue_entry *entry; struct skb_frame_desc *skbdesc; u8 rxd[32]; while (!rt2x00queue_empty(rt2x00dev->rx)) { entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) break; /* * Fill in desc fields of the skb descriptor */ skbdesc = get_skb_frame_desc(entry->skb); skbdesc->desc = rxd; skbdesc->desc_len = entry->queue->desc_size; /* * Send the frame to rt2x00lib for further processing. */ rt2x00lib_rxdone(entry, GFP_KERNEL); } } static void rt2x00usb_interrupt_rxdone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return; /* * Report the frame as DMA done */ rt2x00lib_dmadone(entry); /* * Check if the received data is simply too small * to be actually valid, or if the urb is signaling * a problem. */ if (urb->actual_length < entry->queue->desc_size || urb->status) set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); /* * Schedule the delayed work for reading the RX status * from the device. */ queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); } static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb *entry_priv = entry->priv_data; int status; if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) return false; rt2x00lib_dmastart(entry); usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint), entry->skb->data, entry->skb->len, rt2x00usb_interrupt_rxdone, entry); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { if (status == -ENODEV) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); } return false; } void rt2x00usb_kick_queue(struct data_queue *queue) { switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: if (!rt2x00queue_empty(queue)) rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL, rt2x00usb_kick_tx_entry); break; case QID_RX: if (!rt2x00queue_full(queue)) rt2x00queue_for_each_entry(queue, Q_INDEX, Q_INDEX_DONE, NULL, rt2x00usb_kick_rx_entry); break; default: break; } } EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue); static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv = entry->priv_data; struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return false; usb_kill_urb(entry_priv->urb); /* * Kill guardian urb (if required by driver). */ if ((entry->queue->qid == QID_BEACON) && (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))) usb_kill_urb(bcn_priv->guardian_urb); return false; } void rt2x00usb_flush_queue(struct data_queue *queue, bool drop) { struct work_struct *completion; unsigned int i; if (drop) rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL, rt2x00usb_flush_entry); /* * Obtain the queue completion handler */ switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: completion = &queue->rt2x00dev->txdone_work; break; case QID_RX: completion = &queue->rt2x00dev->rxdone_work; break; default: return; } for (i = 0; i < 10; i++) { /* * Check if the driver is already done, otherwise we * have to sleep a little while to give the driver/hw * the oppurtunity to complete interrupt process itself. */ if (rt2x00queue_empty(queue)) break; /* * Schedule the completion handler manually, when this * worker function runs, it should cleanup the queue. */ queue_work(queue->rt2x00dev->workqueue, completion); /* * Wait for a little while to give the driver * the oppurtunity to recover itself. */ msleep(10); } } EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue); static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue) { rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n", queue->qid); rt2x00queue_stop_queue(queue); rt2x00queue_flush_queue(queue, true); rt2x00queue_start_queue(queue); } static int rt2x00usb_dma_timeout(struct data_queue *queue) { struct queue_entry *entry; entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); return rt2x00queue_dma_timeout(entry); } void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) { if (!rt2x00queue_empty(queue)) { if (rt2x00usb_dma_timeout(queue)) rt2x00usb_watchdog_tx_dma(queue); } } } EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); /* * Radio handlers */ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0, REGISTER_TIMEOUT); } EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); /* * Device initialization handlers. */ void rt2x00usb_clear_entry(struct queue_entry *entry) { entry->flags = 0; if (entry->queue->qid == QID_RX) rt2x00usb_kick_rx_entry(entry, NULL); } EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); static void rt2x00usb_assign_endpoint(struct data_queue *queue, struct usb_endpoint_descriptor *ep_desc) { struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev); int pipe; queue->usb_endpoint = usb_endpoint_num(ep_desc); if (queue->qid == QID_RX) { pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint); queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0); } else { pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint); queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1); } if (!queue->usb_maxpacket) queue->usb_maxpacket = 1; } static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev) { struct usb_interface *intf = to_usb_interface(rt2x00dev->dev); struct usb_host_interface *intf_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *ep_desc; struct data_queue *queue = rt2x00dev->tx; struct usb_endpoint_descriptor *tx_ep_desc = NULL; unsigned int i; /* * Walk through all available endpoints to search for "bulk in" * and "bulk out" endpoints. When we find such endpoints collect * the information we need from the descriptor and assign it * to the queue. */ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep_desc = &intf_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc)) { rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc); } else if (usb_endpoint_is_bulk_out(ep_desc) && (queue != queue_end(rt2x00dev))) { rt2x00usb_assign_endpoint(queue, ep_desc); queue = queue_next(queue); tx_ep_desc = ep_desc; } } /* * At least 1 endpoint for RX and 1 endpoint for TX must be available. */ if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) { rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n"); return -EPIPE; } /* * It might be possible not all queues have a dedicated endpoint. * Loop through all TX queues and copy the endpoint information * which we have gathered from already assigned endpoints. */ txall_queue_for_each(rt2x00dev, queue) { if (!queue->usb_endpoint) rt2x00usb_assign_endpoint(queue, tx_ep_desc); } return 0; } static int rt2x00usb_alloc_entries(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv; struct queue_entry_priv_usb_bcn *bcn_priv; unsigned int i; for (i = 0; i < queue->limit; i++) { entry_priv = queue->entries[i].priv_data; entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL); if (!entry_priv->urb) return -ENOMEM; } /* * If this is not the beacon queue or * no guardian byte was required for the beacon, * then we are done. */ if (queue->qid != QID_BEACON || !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)) return 0; for (i = 0; i < queue->limit; i++) { bcn_priv = queue->entries[i].priv_data; bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL); if (!bcn_priv->guardian_urb) return -ENOMEM; } return 0; } static void rt2x00usb_free_entries(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv; struct queue_entry_priv_usb_bcn *bcn_priv; unsigned int i; if (!queue->entries) return; for (i = 0; i < queue->limit; i++) { entry_priv = queue->entries[i].priv_data; usb_kill_urb(entry_priv->urb); usb_free_urb(entry_priv->urb); } /* * If this is not the beacon queue or * no guardian byte was required for the beacon, * then we are done. */ if (queue->qid != QID_BEACON || !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)) return; for (i = 0; i < queue->limit; i++) { bcn_priv = queue->entries[i].priv_data; usb_kill_urb(bcn_priv->guardian_urb); usb_free_urb(bcn_priv->guardian_urb); } } int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; int status; /* * Find endpoints for each queue */ status = rt2x00usb_find_endpoints(rt2x00dev); if (status) goto exit; /* * Allocate DMA */ queue_for_each(rt2x00dev, queue) { status = rt2x00usb_alloc_entries(queue); if (status) goto exit; } return 0; exit: rt2x00usb_uninitialize(rt2x00dev); return status; } EXPORT_SYMBOL_GPL(rt2x00usb_initialize); void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; queue_for_each(rt2x00dev, queue) rt2x00usb_free_entries(queue); } EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); /* * USB driver handlers. */ static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev) { kfree(rt2x00dev->rf); rt2x00dev->rf = NULL; kfree(rt2x00dev->eeprom); rt2x00dev->eeprom = NULL; kfree(rt2x00dev->csr.cache); rt2x00dev->csr.cache = NULL; } static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) { rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); if (!rt2x00dev->csr.cache) goto exit; rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); if (!rt2x00dev->eeprom) goto exit; rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); if (!rt2x00dev->rf) goto exit; return 0; exit: rt2x00_probe_err("Failed to allocate registers\n"); rt2x00usb_free_reg(rt2x00dev); return -ENOMEM; } int rt2x00usb_probe(struct usb_interface *usb_intf, const struct rt2x00_ops *ops) { struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; usb_dev = usb_get_dev(usb_dev); usb_reset_device(usb_dev); hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; goto exit_put_device; } usb_set_intfdata(usb_intf, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &usb_intf->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB); INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone); INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone); hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); retval = rt2x00usb_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_reg; return 0; exit_free_reg: rt2x00usb_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); exit_put_device: usb_put_dev(usb_dev); usb_set_intfdata(usb_intf, NULL); return retval; } EXPORT_SYMBOL_GPL(rt2x00usb_probe); void rt2x00usb_disconnect(struct usb_interface *usb_intf) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; /* * Free all allocated data. */ rt2x00lib_remove_dev(rt2x00dev); rt2x00usb_free_reg(rt2x00dev); ieee80211_free_hw(hw); /* * Free the USB device data. */ usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); } EXPORT_SYMBOL_GPL(rt2x00usb_disconnect); #ifdef CONFIG_PM int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_suspend(rt2x00dev, state); } EXPORT_SYMBOL_GPL(rt2x00usb_suspend); int rt2x00usb_resume(struct usb_interface *usb_intf) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_resume(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00usb_resume); #endif /* CONFIG_PM */ /* * rt2x00usb module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 usb library"); MODULE_LICENSE("GPL");
gpl-2.0
codefarmer-cyk/linux
arch/arm/mach-orion5x/irq.c
1011
1532
/* * arch/arm/mach-orion5x/irq.c * * Core IRQ functions for Marvell Orion System On Chip * * Maintainer: Tzachi Perelstein <tzachi@marvell.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/bridge-regs.h> #include <plat/orion-gpio.h> #include <plat/irq.h> #include <asm/exception.h> #include "common.h" static int __initdata gpio0_irqs[4] = { IRQ_ORION5X_GPIO_0_7, IRQ_ORION5X_GPIO_8_15, IRQ_ORION5X_GPIO_16_23, IRQ_ORION5X_GPIO_24_31, }; #ifdef CONFIG_MULTI_IRQ_HANDLER /* * Compiling with both non-DT and DT support enabled, will * break asm irq handler used by non-DT boards. Therefore, * we provide a C-style irq handler even for non-DT boards, * if MULTI_IRQ_HANDLER is set. */ asmlinkage void __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs) { u32 stat; stat = readl_relaxed(MAIN_IRQ_CAUSE); stat &= readl_relaxed(MAIN_IRQ_MASK); if (stat) { unsigned int hwirq = __fls(stat); handle_IRQ(hwirq, regs); return; } } #endif void __init orion5x_init_irq(void) { orion_irq_init(0, MAIN_IRQ_MASK); #ifdef CONFIG_MULTI_IRQ_HANDLER set_handle_irq(orion5x_legacy_handle_irq); #endif /* * Initialize gpiolib for GPIOs 0-31. */ orion_gpio_init(NULL, 0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START, gpio0_irqs); }
gpl-2.0
robertdolca/linux
arch/mips/fw/arc/misc.c
1523
1564
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Miscellaneous ARCS PROM routines. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999 Silicon Graphics, Inc. */ #include <linux/compiler.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/irqflags.h> #include <asm/bcache.h> #include <asm/fw/arc/types.h> #include <asm/sgialib.h> #include <asm/bootinfo.h> VOID __noreturn ArcHalt(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(halt); unreachable(); } VOID __noreturn ArcPowerDown(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(pdown); unreachable(); } /* XXX is this a soft reset basically? XXX */ VOID __noreturn ArcRestart(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(restart); unreachable(); } VOID __noreturn ArcReboot(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(reboot); unreachable(); } VOID __noreturn ArcEnterInteractiveMode(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(imode); unreachable(); } LONG ArcSaveConfiguration(VOID) { return ARC_CALL0(cfg_save); } struct linux_sysid * ArcGetSystemId(VOID) { return (struct linux_sysid *) ARC_CALL0(get_sysid); } VOID __init ArcFlushAllCaches(VOID) { ARC_CALL0(cache_flush); } DISPLAY_STATUS * __init ArcGetDisplayStatus(ULONG FileID) { return (DISPLAY_STATUS *) ARC_CALL1(GetDisplayStatus, FileID); }
gpl-2.0
Jazz-823/semc-kernel-msm7x30
fs/exofs/namei.c
1523
7813
/* * Copyright (C) 2005, 2006 * Avishay Traeger (avishay@gmail.com) * Copyright (C) 2008, 2009 * Boaz Harrosh <bharrosh@panasas.com> * * Copyrights for code taken from ext2: * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * from * linux/fs/minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * This file is part of exofs. * * exofs is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. Since it is based on ext2, and the only * valid version of GPL for the Linux kernel is version 2, the only valid * version of GPL for exofs is version 2. * * exofs is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with exofs; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "exofs.h" static inline int exofs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = exofs_add_link(dentry, inode); if (!err) { d_instantiate(dentry, inode); return 0; } inode_dec_link_count(inode); iput(inode); return err; } static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode; ino_t ino; if (dentry->d_name.len > EXOFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); ino = exofs_inode_by_name(dir, dentry); inode = NULL; if (ino) { inode = exofs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); } return d_splice_alias(inode, dentry); } static int exofs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct inode *inode = exofs_new_inode(dir, mode); int err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &exofs_file_inode_operations; inode->i_fop = &exofs_file_operations; inode->i_mapping->a_ops = &exofs_aops; mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); } return err; } static int exofs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *inode; int err; if (!new_valid_dev(rdev)) return -EINVAL; inode = exofs_new_inode(dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); } return err; } static int exofs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct super_block *sb = dir->i_sb; int err = -ENAMETOOLONG; unsigned l = strlen(symname)+1; struct inode *inode; struct exofs_i_info *oi; if (l > sb->s_blocksize) goto out; inode = exofs_new_inode(dir, S_IFLNK | S_IRWXUGO); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out; oi = exofs_i(inode); if (l > sizeof(oi->i_data)) { /* slow symlink */ inode->i_op = &exofs_symlink_inode_operations; inode->i_mapping->a_ops = &exofs_aops; memset(oi->i_data, 0, sizeof(oi->i_data)); err = page_symlink(inode, symname, l); if (err) goto out_fail; } else { /* fast symlink */ inode->i_op = &exofs_fast_symlink_inode_operations; memcpy(oi->i_data, symname, l); inode->i_size = l-1; } mark_inode_dirty(inode); err = exofs_add_nondir(dentry, inode); out: return err; out_fail: inode_dec_link_count(inode); iput(inode); goto out; } static int exofs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; if (inode->i_nlink >= EXOFS_LINK_MAX) return -EMLINK; inode->i_ctime = CURRENT_TIME; inode_inc_link_count(inode); atomic_inc(&inode->i_count); return exofs_add_nondir(dentry, inode); } static int exofs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct inode *inode; int err = -EMLINK; if (dir->i_nlink >= EXOFS_LINK_MAX) goto out; inode_inc_link_count(dir); inode = exofs_new_inode(dir, S_IFDIR | mode); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_dir; inode->i_op = &exofs_dir_inode_operations; inode->i_fop = &exofs_dir_operations; inode->i_mapping->a_ops = &exofs_aops; inode_inc_link_count(inode); err = exofs_make_empty(inode, dir); if (err) goto out_fail; err = exofs_add_link(dentry, inode); if (err) goto out_fail; d_instantiate(dentry, inode); out: return err; out_fail: inode_dec_link_count(inode); inode_dec_link_count(inode); iput(inode); out_dir: inode_dec_link_count(dir); goto out; } static int exofs_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct exofs_dir_entry *de; struct page *page; int err = -ENOENT; de = exofs_find_entry(dir, dentry, &page); if (!de) goto out; err = exofs_delete_entry(de, page); if (err) goto out; inode->i_ctime = dir->i_ctime; inode_dec_link_count(inode); err = 0; out: return err; } static int exofs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; int err = -ENOTEMPTY; if (exofs_empty_dir(inode)) { err = exofs_unlink(dir, dentry); if (!err) { inode->i_size = 0; inode_dec_link_count(inode); inode_dec_link_count(dir); } } return err; } static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *dir_page = NULL; struct exofs_dir_entry *dir_de = NULL; struct page *old_page; struct exofs_dir_entry *old_de; int err = -ENOENT; old_de = exofs_find_entry(old_dir, old_dentry, &old_page); if (!old_de) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; dir_de = exofs_dotdot(old_inode, &dir_page); if (!dir_de) goto out_old; } if (new_inode) { struct page *new_page; struct exofs_dir_entry *new_de; err = -ENOTEMPTY; if (dir_de && !exofs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_de = exofs_find_entry(new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; inode_inc_link_count(old_inode); err = exofs_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (dir_de) drop_nlink(new_inode); inode_dec_link_count(new_inode); if (err) goto out_dir; } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= EXOFS_LINK_MAX) goto out_dir; } inode_inc_link_count(old_inode); err = exofs_add_link(new_dentry, old_inode); if (err) { inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) inode_inc_link_count(new_dir); } old_inode->i_ctime = CURRENT_TIME; exofs_delete_entry(old_de, old_page); inode_dec_link_count(old_inode); if (dir_de) { err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); inode_dec_link_count(old_dir); if (err) goto out_dir; } return 0; out_dir: if (dir_de) { kunmap(dir_page); page_cache_release(dir_page); } out_old: kunmap(old_page); page_cache_release(old_page); out: return err; } const struct inode_operations exofs_dir_inode_operations = { .create = exofs_create, .lookup = exofs_lookup, .link = exofs_link, .unlink = exofs_unlink, .symlink = exofs_symlink, .mkdir = exofs_mkdir, .rmdir = exofs_rmdir, .mknod = exofs_mknod, .rename = exofs_rename, .setattr = exofs_setattr, }; const struct inode_operations exofs_special_inode_operations = { .setattr = exofs_setattr, };
gpl-2.0
koxda/android_kernel_samsung_msm8660-common
arch/arm/plat-s5p/s5p-time.c
2291
9365
/* linux/arch/arm/plat-s5p/s5p-time.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5P - Common hr-timer support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/platform_device.h> #include <asm/smp_twd.h> #include <asm/mach/time.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/sched_clock.h> #include <mach/map.h> #include <plat/devs.h> #include <plat/regs-timer.h> #include <plat/s5p-time.h> static struct clk *tin_event; static struct clk *tin_source; static struct clk *tdiv_event; static struct clk *tdiv_source; static struct clk *timerclk; static struct s5p_timer_source timer_source; static unsigned long clock_count_per_tick; static void s5p_timer_resume(void); static void s5p_time_stop(enum s5p_timer_mode mode) { unsigned long tcon; tcon = __raw_readl(S3C2410_TCON); switch (mode) { case S5P_PWM0: tcon &= ~S3C2410_TCON_T0START; break; case S5P_PWM1: tcon &= ~S3C2410_TCON_T1START; break; case S5P_PWM2: tcon &= ~S3C2410_TCON_T2START; break; case S5P_PWM3: tcon &= ~S3C2410_TCON_T3START; break; case S5P_PWM4: tcon &= ~S3C2410_TCON_T4START; break; default: printk(KERN_ERR "Invalid Timer %d\n", mode); break; } __raw_writel(tcon, S3C2410_TCON); } static void s5p_time_setup(enum s5p_timer_mode mode, unsigned long tcnt) { unsigned long tcon; tcon = __raw_readl(S3C2410_TCON); tcnt--; switch (mode) { case S5P_PWM0: tcon &= ~(0x0f << 0); tcon |= S3C2410_TCON_T0MANUALUPD; break; case S5P_PWM1: tcon &= ~(0x0f << 8); tcon |= S3C2410_TCON_T1MANUALUPD; break; case S5P_PWM2: tcon &= ~(0x0f << 12); tcon |= S3C2410_TCON_T2MANUALUPD; break; case S5P_PWM3: tcon &= ~(0x0f << 16); tcon |= S3C2410_TCON_T3MANUALUPD; break; case S5P_PWM4: tcon &= ~(0x07 << 20); tcon |= S3C2410_TCON_T4MANUALUPD; break; default: printk(KERN_ERR "Invalid Timer %d\n", mode); break; } __raw_writel(tcnt, S3C2410_TCNTB(mode)); __raw_writel(tcnt, S3C2410_TCMPB(mode)); __raw_writel(tcon, S3C2410_TCON); } static void s5p_time_start(enum s5p_timer_mode mode, bool periodic) { unsigned long tcon; tcon = __raw_readl(S3C2410_TCON); switch (mode) { case S5P_PWM0: tcon |= S3C2410_TCON_T0START; tcon &= ~S3C2410_TCON_T0MANUALUPD; if (periodic) tcon |= S3C2410_TCON_T0RELOAD; else tcon &= ~S3C2410_TCON_T0RELOAD; break; case S5P_PWM1: tcon |= S3C2410_TCON_T1START; tcon &= ~S3C2410_TCON_T1MANUALUPD; if (periodic) tcon |= S3C2410_TCON_T1RELOAD; else tcon &= ~S3C2410_TCON_T1RELOAD; break; case S5P_PWM2: tcon |= S3C2410_TCON_T2START; tcon &= ~S3C2410_TCON_T2MANUALUPD; if (periodic) tcon |= S3C2410_TCON_T2RELOAD; else tcon &= ~S3C2410_TCON_T2RELOAD; break; case S5P_PWM3: tcon |= S3C2410_TCON_T3START; tcon &= ~S3C2410_TCON_T3MANUALUPD; if (periodic) tcon |= S3C2410_TCON_T3RELOAD; else tcon &= ~S3C2410_TCON_T3RELOAD; break; case S5P_PWM4: tcon |= S3C2410_TCON_T4START; tcon &= ~S3C2410_TCON_T4MANUALUPD; if (periodic) tcon |= S3C2410_TCON_T4RELOAD; else tcon &= ~S3C2410_TCON_T4RELOAD; break; default: printk(KERN_ERR "Invalid Timer %d\n", mode); break; } __raw_writel(tcon, S3C2410_TCON); } static int s5p_set_next_event(unsigned long cycles, struct clock_event_device *evt) { s5p_time_setup(timer_source.event_id, cycles); s5p_time_start(timer_source.event_id, NON_PERIODIC); return 0; } static void s5p_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { s5p_time_stop(timer_source.event_id); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: s5p_time_setup(timer_source.event_id, clock_count_per_tick); s5p_time_start(timer_source.event_id, PERIODIC); break; case CLOCK_EVT_MODE_ONESHOT: break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: break; case CLOCK_EVT_MODE_RESUME: s5p_timer_resume(); break; } } static void s5p_timer_resume(void) { /* event timer restart */ s5p_time_setup(timer_source.event_id, clock_count_per_tick); s5p_time_start(timer_source.event_id, PERIODIC); /* source timer restart */ s5p_time_setup(timer_source.source_id, TCNT_MAX); s5p_time_start(timer_source.source_id, PERIODIC); } void __init s5p_set_timer_source(enum s5p_timer_mode event, enum s5p_timer_mode source) { s3c_device_timer[event].dev.bus = &platform_bus_type; s3c_device_timer[source].dev.bus = &platform_bus_type; timer_source.event_id = event; timer_source.source_id = source; } static struct clock_event_device time_event_device = { .name = "s5p_event_timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .rating = 200, .set_next_event = s5p_set_next_event, .set_mode = s5p_set_mode, }; static irqreturn_t s5p_clock_event_isr(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction s5p_clock_event_irq = { .name = "s5p_time_irq", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = s5p_clock_event_isr, .dev_id = &time_event_device, }; static void __init s5p_clockevent_init(void) { unsigned long pclk; unsigned long clock_rate; unsigned int irq_number; struct clk *tscaler; pclk = clk_get_rate(timerclk); tscaler = clk_get_parent(tdiv_event); clk_set_rate(tscaler, pclk / 2); clk_set_rate(tdiv_event, pclk / 2); clk_set_parent(tin_event, tdiv_event); clock_rate = clk_get_rate(tin_event); clock_count_per_tick = clock_rate / HZ; clockevents_calc_mult_shift(&time_event_device, clock_rate, S5PTIMER_MIN_RANGE); time_event_device.max_delta_ns = clockevent_delta2ns(-1, &time_event_device); time_event_device.min_delta_ns = clockevent_delta2ns(1, &time_event_device); time_event_device.cpumask = cpumask_of(0); clockevents_register_device(&time_event_device); irq_number = timer_source.event_id + IRQ_TIMER0; setup_irq(irq_number, &s5p_clock_event_irq); } static void __iomem *s5p_timer_reg(void) { unsigned long offset = 0; switch (timer_source.source_id) { case S5P_PWM0: case S5P_PWM1: case S5P_PWM2: case S5P_PWM3: offset = (timer_source.source_id * 0x0c) + 0x14; break; case S5P_PWM4: offset = 0x40; break; default: printk(KERN_ERR "Invalid Timer %d\n", timer_source.source_id); return NULL; } return S3C_TIMERREG(offset); } static cycle_t s5p_timer_read(struct clocksource *cs) { void __iomem *reg = s5p_timer_reg(); return (cycle_t) (reg ? ~__raw_readl(reg) : 0); } /* * Override the global weak sched_clock symbol with this * local implementation which uses the clocksource to get some * better resolution when scheduling the kernel. We accept that * this wraps around for now, since it is just a relative time * stamp. (Inspired by U300 implementation.) */ static DEFINE_CLOCK_DATA(cd); unsigned long long notrace sched_clock(void) { void __iomem *reg = s5p_timer_reg(); if (!reg) return 0; return cyc_to_sched_clock(&cd, ~__raw_readl(reg), (u32)~0); } static void notrace s5p_update_sched_clock(void) { void __iomem *reg = s5p_timer_reg(); if (!reg) return; update_sched_clock(&cd, ~__raw_readl(reg), (u32)~0); } struct clocksource time_clocksource = { .name = "s5p_clocksource_timer", .rating = 250, .read = s5p_timer_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static void __init s5p_clocksource_init(void) { unsigned long pclk; unsigned long clock_rate; pclk = clk_get_rate(timerclk); clk_set_rate(tdiv_source, pclk / 2); clk_set_parent(tin_source, tdiv_source); clock_rate = clk_get_rate(tin_source); s5p_time_setup(timer_source.source_id, TCNT_MAX); s5p_time_start(timer_source.source_id, PERIODIC); init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate); if (clocksource_register_hz(&time_clocksource, clock_rate)) panic("%s: can't register clocksource\n", time_clocksource.name); } static void __init s5p_timer_resources(void) { unsigned long event_id = timer_source.event_id; unsigned long source_id = timer_source.source_id; timerclk = clk_get(NULL, "timers"); if (IS_ERR(timerclk)) panic("failed to get timers clock for timer"); clk_enable(timerclk); tin_event = clk_get(&s3c_device_timer[event_id].dev, "pwm-tin"); if (IS_ERR(tin_event)) panic("failed to get pwm-tin clock for event timer"); tdiv_event = clk_get(&s3c_device_timer[event_id].dev, "pwm-tdiv"); if (IS_ERR(tdiv_event)) panic("failed to get pwm-tdiv clock for event timer"); clk_enable(tin_event); tin_source = clk_get(&s3c_device_timer[source_id].dev, "pwm-tin"); if (IS_ERR(tin_source)) panic("failed to get pwm-tin clock for source timer"); tdiv_source = clk_get(&s3c_device_timer[source_id].dev, "pwm-tdiv"); if (IS_ERR(tdiv_source)) panic("failed to get pwm-tdiv clock for source timer"); clk_enable(tin_source); } static void __init s5p_timer_init(void) { s5p_timer_resources(); s5p_clockevent_init(); s5p_clocksource_init(); } struct sys_timer s5p_timer = { .init = s5p_timer_init, };
gpl-2.0
utilite-computer/linux-kernel-3.0
sound/pci/echoaudio/indigodj.c
3571
2921
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO_DJ #define ECHOCARD_NAME "Indigo DJ" #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 0 */ #define PX_DIGITAL_IN 8 /* 0 */ #define PX_NUM 8 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 4 */ #define BX_DIGITAL_OUT 4 /* 0 */ #define BX_ANALOG_IN 4 /* 0 */ #define BX_DIGITAL_IN 4 /* 0 */ #define BX_NUM 4 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> #include <asm/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_dj_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_DJ_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_dj_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x00B0, 0, 0, 0}, /* Indigo DJ*/ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 4, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigodj_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
ZdrowyGosciu/kernel_lge_g2_msm8974-caf_lg-devs
arch/x86/kernel/sys_x86_64.c
4083
6847
#include <linux/errno.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/utsname.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/uaccess.h> #include <linux/elf.h> #include <asm/ia32.h> #include <asm/syscalls.h> /* * Align a virtual address to avoid aliasing in the I$ on AMD F15h. * * @flags denotes the allocation direction - bottomup or topdown - * or vDSO; see call sites below. */ unsigned long align_addr(unsigned long addr, struct file *filp, enum align_flags flags) { unsigned long tmp_addr; /* handle 32- and 64-bit case with a single conditional */ if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) return addr; if (!(current->flags & PF_RANDOMIZE)) return addr; if (!((flags & ALIGN_VDSO) || filp)) return addr; tmp_addr = addr; /* * We need an address which is <= than the original * one only when in topdown direction. */ if (!(flags & ALIGN_TOPDOWN)) tmp_addr += va_align.mask; tmp_addr &= ~va_align.mask; return tmp_addr; } static int __init control_va_addr_alignment(char *str) { /* guard against enabling this on other CPU families */ if (va_align.flags < 0) return 1; if (*str == 0) return 1; if (*str == '=') str++; if (!strcmp(str, "32")) va_align.flags = ALIGN_VA_32; else if (!strcmp(str, "64")) va_align.flags = ALIGN_VA_64; else if (!strcmp(str, "off")) va_align.flags = 0; else if (!strcmp(str, "on")) va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; else return 0; return 1; } __setup("align_va_addr", control_va_addr_alignment); SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { long error; error = -EINVAL; if (off & ~PAGE_MASK) goto out; error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return error; } static void find_start_end(unsigned long flags, unsigned long *begin, unsigned long *end) { if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) { unsigned long new_begin; /* This is usually used needed to map code in small model, so it needs to be in the first 31bit. Limit it to that. This means we need to move the unmapped base down for this case. This can give conflicts with the heap, but we assume that glibc malloc knows how to fall back to mmap. Give it 1GB of playground for now. -AK */ *begin = 0x40000000; *end = 0x80000000; if (current->flags & PF_RANDOMIZE) { new_begin = randomize_range(*begin, *begin + 0x02000000, 0); if (new_begin) *begin = new_begin; } } else { *begin = TASK_UNMAPPED_BASE; *end = TASK_SIZE; } } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; unsigned long begin, end; if (flags & MAP_FIXED) return addr; find_start_end(flags, &begin, &end); if (len > end) return -ENOMEM; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32)) && len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = begin; } addr = mm->free_area_cache; if (addr < begin) addr = begin; start_addr = addr; full_search: addr = align_addr(addr, filp, 0); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (end - len < addr) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != begin) { start_addr = addr = begin; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (!vma || addr + len <= vma->vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; addr = align_addr(addr, filp, 0); } } unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0, start_addr; /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* for MAP_32BIT mappings we force the legact mmap base */ if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) goto bottomup; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } /* check if free_area_cache is useful for us */ if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = mm->mmap_base; } try_again: /* either no address requested or can't fit in requested address hole */ start_addr = addr = mm->free_area_cache; if (addr < len) goto fail; addr -= len; do { addr = align_addr(addr, filp, ALIGN_TOPDOWN); /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); if (!vma || addr+len <= vma->vm_start) /* remember the address as a hint for next time */ return mm->free_area_cache = addr; /* remember the largest hole we saw so far */ if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = vma->vm_start-len; } while (len < vma->vm_start); fail: /* * if hint left us with no space for the requested * mapping then try again: */ if (start_addr != mm->mmap_base) { mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = 0; goto try_again; } bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ mm->cached_hole_size = ~0UL; mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; return addr; }
gpl-2.0
omnirom/android_kernel_asus_me301t
drivers/hwmon/ds1621.c
4083
9839
/* ds1621.c - Part of lm_sensors, Linux kernel modules for hardware monitoring Christian W. Zuckschwerdt <zany@triq.net> 2000-11-23 based on lm75.c by Frodo Looijaard <frodol@dds.nl> Ported to Linux 2.6 by Aurelien Jarno <aurelien@aurel32.net> with the help of Jean Delvare <khali@linux-fr.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include "lm75.h" /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* Insmod parameters */ static int polarity = -1; module_param(polarity, int, 0); MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low"); /* Many DS1621 constants specified below */ /* Config register used for detection */ /* 7 6 5 4 3 2 1 0 */ /* |Done|THF |TLF |NVB | X | X |POL |1SHOT| */ #define DS1621_REG_CONFIG_NVB 0x10 #define DS1621_REG_CONFIG_POLARITY 0x02 #define DS1621_REG_CONFIG_1SHOT 0x01 #define DS1621_REG_CONFIG_DONE 0x80 /* The DS1621 registers */ static const u8 DS1621_REG_TEMP[3] = { 0xAA, /* input, word, RO */ 0xA2, /* min, word, RW */ 0xA1, /* max, word, RW */ }; #define DS1621_REG_CONF 0xAC /* byte, RW */ #define DS1621_COM_START 0xEE /* no data */ #define DS1621_COM_STOP 0x22 /* no data */ /* The DS1621 configuration register */ #define DS1621_ALARM_TEMP_HIGH 0x40 #define DS1621_ALARM_TEMP_LOW 0x20 /* Conversions */ #define ALARMS_FROM_REG(val) ((val) & \ (DS1621_ALARM_TEMP_HIGH | DS1621_ALARM_TEMP_LOW)) /* Each client has this additional data */ struct ds1621_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u16 temp[3]; /* Register values, word */ u8 conf; /* Register encoding, combined */ }; /* Temperature registers are word-sized. DS1621 uses a high-byte first convention, which is exactly opposite to the SMBus standard. */ static int ds1621_read_temp(struct i2c_client *client, u8 reg) { int ret; ret = i2c_smbus_read_word_data(client, reg); if (ret < 0) return ret; return swab16(ret); } static int ds1621_write_temp(struct i2c_client *client, u8 reg, u16 value) { return i2c_smbus_write_word_data(client, reg, swab16(value)); } static void ds1621_init_client(struct i2c_client *client) { u8 conf, new_conf; new_conf = conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF); /* switch to continuous conversion mode */ new_conf &= ~DS1621_REG_CONFIG_1SHOT; /* setup output polarity */ if (polarity == 0) new_conf &= ~DS1621_REG_CONFIG_POLARITY; else if (polarity == 1) new_conf |= DS1621_REG_CONFIG_POLARITY; if (conf != new_conf) i2c_smbus_write_byte_data(client, DS1621_REG_CONF, new_conf); /* start conversion */ i2c_smbus_write_byte(client, DS1621_COM_START); } static struct ds1621_data *ds1621_update_client(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct ds1621_data *data = i2c_get_clientdata(client); u8 new_conf; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { int i; dev_dbg(&client->dev, "Starting ds1621 update\n"); data->conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF); for (i = 0; i < ARRAY_SIZE(data->temp); i++) data->temp[i] = ds1621_read_temp(client, DS1621_REG_TEMP[i]); /* reset alarms if necessary */ new_conf = data->conf; if (data->temp[0] > data->temp[1]) /* input > min */ new_conf &= ~DS1621_ALARM_TEMP_LOW; if (data->temp[0] < data->temp[2]) /* input < max */ new_conf &= ~DS1621_ALARM_TEMP_HIGH; if (data->conf != new_conf) i2c_smbus_write_byte_data(client, DS1621_REG_CONF, new_conf); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static ssize_t show_temp(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ds1621_data *data = ds1621_update_client(dev); return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->temp[attr->index])); } static ssize_t set_temp(struct device *dev, struct device_attribute *da, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); struct ds1621_data *data = i2c_get_clientdata(client); u16 val = LM75_TEMP_TO_REG(simple_strtol(buf, NULL, 10)); mutex_lock(&data->update_lock); data->temp[attr->index] = val; ds1621_write_temp(client, DS1621_REG_TEMP[attr->index], data->temp[attr->index]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_alarms(struct device *dev, struct device_attribute *da, char *buf) { struct ds1621_data *data = ds1621_update_client(dev); return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->conf)); } static ssize_t show_alarm(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct ds1621_data *data = ds1621_update_client(dev); return sprintf(buf, "%d\n", !!(data->conf & attr->index)); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 2); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, DS1621_ALARM_TEMP_LOW); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, DS1621_ALARM_TEMP_HIGH); static struct attribute *ds1621_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &dev_attr_alarms.attr, NULL }; static const struct attribute_group ds1621_group = { .attrs = ds1621_attributes, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int ds1621_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int conf, temp; int i; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_WRITE_BYTE)) return -ENODEV; /* Now, we do the remaining detection. It is lousy. */ /* The NVB bit should be low if no EEPROM write has been requested during the latest 10ms, which is highly improbable in our case. */ conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF); if (conf < 0 || conf & DS1621_REG_CONFIG_NVB) return -ENODEV; /* The 7 lowest bits of a temperature should always be 0. */ for (i = 0; i < ARRAY_SIZE(DS1621_REG_TEMP); i++) { temp = i2c_smbus_read_word_data(client, DS1621_REG_TEMP[i]); if (temp < 0 || (temp & 0x7f00)) return -ENODEV; } strlcpy(info->type, "ds1621", I2C_NAME_SIZE); return 0; } static int ds1621_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ds1621_data *data; int err; data = kzalloc(sizeof(struct ds1621_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Initialize the DS1621 chip */ ds1621_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &ds1621_group))) goto exit_free; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&client->dev.kobj, &ds1621_group); exit_free: kfree(data); exit: return err; } static int ds1621_remove(struct i2c_client *client) { struct ds1621_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &ds1621_group); kfree(data); return 0; } static const struct i2c_device_id ds1621_id[] = { { "ds1621", 0 }, { "ds1625", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ds1621_id); /* This is the driver that will be inserted */ static struct i2c_driver ds1621_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "ds1621", }, .probe = ds1621_probe, .remove = ds1621_remove, .id_table = ds1621_id, .detect = ds1621_detect, .address_list = normal_i2c, }; static int __init ds1621_init(void) { return i2c_add_driver(&ds1621_driver); } static void __exit ds1621_exit(void) { i2c_del_driver(&ds1621_driver); } MODULE_AUTHOR("Christian W. Zuckschwerdt <zany@triq.net>"); MODULE_DESCRIPTION("DS1621 driver"); MODULE_LICENSE("GPL"); module_init(ds1621_init); module_exit(ds1621_exit);
gpl-2.0
tbalden/android_kernel_htc_m7-sense4.3
drivers/mmc/host/sh_mobile_sdhi.c
4851
7404
/* * SuperH Mobile SDHI * * Copyright (C) 2009 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on "Compaq ASIC3 support": * * Copyright 2001 Compaq Computer Corporation. * Copyright 2004-2005 Phil Blundell * Copyright 2007-2008 OpenedHand Ltd. * * Authors: Phil Blundell <pb@handhelds.org>, * Samuel Ortiz <sameo@openedhand.com> * */ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mfd/tmio.h> #include <linux/sh_dma.h> #include <linux/delay.h> #include "tmio_mmc.h" struct sh_mobile_sdhi { struct clk *clk; struct tmio_mmc_data mmc_data; struct sh_dmae_slave param_tx; struct sh_dmae_slave param_rx; struct tmio_mmc_dma dma_priv; }; static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) { struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; if (p && p->set_pwr) p->set_pwr(pdev, state); } static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) { struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; if (p && p->get_cd) return p->get_cd(pdev); else return -ENOSYS; } static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host) { int timeout = 1000; while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13))) udelay(1); if (!timeout) { dev_warn(host->pdata->dev, "timeout waiting for SD bus idle\n"); return -EBUSY; } return 0; } static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) { switch (addr) { case CTL_SD_CMD: case CTL_STOP_INTERNAL_ACTION: case CTL_XFER_BLK_COUNT: case CTL_SD_CARD_CLK_CTL: case CTL_SD_XFER_LEN: case CTL_SD_MEM_CARD_OPT: case CTL_TRANSACTION_CTL: case CTL_DMA_ENABLE: return sh_mobile_sdhi_wait_idle(host); } return 0; } static void sh_mobile_sdhi_cd_wakeup(const struct platform_device *pdev) { mmc_detect_change(dev_get_drvdata(&pdev->dev), msecs_to_jiffies(100)); } static const struct sh_mobile_sdhi_ops sdhi_ops = { .cd_wakeup = sh_mobile_sdhi_cd_wakeup, }; static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) { struct sh_mobile_sdhi *priv; struct tmio_mmc_data *mmc_data; struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct tmio_mmc_host *host; char clk_name[8]; int irq, ret, i = 0; bool multiplexed_isr = true; priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); if (priv == NULL) { dev_err(&pdev->dev, "kzalloc failed\n"); return -ENOMEM; } mmc_data = &priv->mmc_data; p->pdata = mmc_data; if (p->init) { ret = p->init(pdev, &sdhi_ops); if (ret) goto einit; } snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); priv->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); ret = PTR_ERR(priv->clk); goto eclkget; } mmc_data->hclk = clk_get_rate(priv->clk); mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; mmc_data->get_cd = sh_mobile_sdhi_get_cd; mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; if (p) { mmc_data->flags = p->tmio_flags; if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; mmc_data->ocr_mask = p->tmio_ocr_mask; mmc_data->capabilities |= p->tmio_caps; mmc_data->cd_gpio = p->cd_gpio; if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { priv->param_tx.slave_id = p->dma_slave_tx; priv->param_rx.slave_id = p->dma_slave_rx; priv->dma_priv.chan_priv_tx = &priv->param_tx; priv->dma_priv.chan_priv_rx = &priv->param_rx; priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ mmc_data->dma = &priv->dma_priv; } } /* * All SDHI blocks support 2-byte and larger block sizes in 4-bit * bus width mode. */ mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; /* * All SDHI blocks support SDIO IRQ signalling. */ mmc_data->flags |= TMIO_MMC_SDIO_IRQ; ret = tmio_mmc_host_probe(&host, pdev, mmc_data); if (ret < 0) goto eprobe; /* * Allow one or more specific (named) ISRs or * one or more multiplexed (un-named) ISRs. */ irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); if (irq >= 0) { multiplexed_isr = false; ret = request_irq(irq, tmio_mmc_card_detect_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq_card_detect; } irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); if (irq >= 0) { multiplexed_isr = false; ret = request_irq(irq, tmio_mmc_sdio_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq_sdio; } irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD); if (irq >= 0) { multiplexed_isr = false; ret = request_irq(irq, tmio_mmc_sdcard_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq_sdcard; } else if (!multiplexed_isr) { dev_err(&pdev->dev, "Principal SD-card IRQ is missing among named interrupts\n"); ret = irq; goto eirq_sdcard; } if (multiplexed_isr) { while (1) { irq = platform_get_irq(pdev, i); if (irq < 0) break; i++; ret = request_irq(irq, tmio_mmc_irq, 0, dev_name(&pdev->dev), host); if (ret) goto eirq_multiplexed; } /* There must be at least one IRQ source */ if (!i) goto eirq_multiplexed; } dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", mmc_hostname(host->mmc), (unsigned long) (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start), mmc_data->hclk / 1000000); return ret; eirq_multiplexed: while (i--) { irq = platform_get_irq(pdev, i); free_irq(irq, host); } eirq_sdcard: irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); if (irq >= 0) free_irq(irq, host); eirq_sdio: irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); if (irq >= 0) free_irq(irq, host); eirq_card_detect: tmio_mmc_host_remove(host); eprobe: clk_put(priv->clk); eclkget: if (p->cleanup) p->cleanup(pdev); einit: kfree(priv); return ret; } static int sh_mobile_sdhi_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct tmio_mmc_host *host = mmc_priv(mmc); struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; int i = 0, irq; p->pdata = NULL; tmio_mmc_host_remove(host); while (1) { irq = platform_get_irq(pdev, i++); if (irq < 0) break; free_irq(irq, host); } clk_put(priv->clk); if (p->cleanup) p->cleanup(pdev); kfree(priv); return 0; } static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { .suspend = tmio_mmc_host_suspend, .resume = tmio_mmc_host_resume, .runtime_suspend = tmio_mmc_host_runtime_suspend, .runtime_resume = tmio_mmc_host_runtime_resume, }; static struct platform_driver sh_mobile_sdhi_driver = { .driver = { .name = "sh_mobile_sdhi", .owner = THIS_MODULE, .pm = &tmio_mmc_dev_pm_ops, }, .probe = sh_mobile_sdhi_probe, .remove = __devexit_p(sh_mobile_sdhi_remove), }; module_platform_driver(sh_mobile_sdhi_driver); MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:sh_mobile_sdhi");
gpl-2.0
monishk10/kernel_cancro
arch/arm/mach-ux500/usb.c
4851
3980
/* * Copyright (C) ST-Ericsson SA 2011 * * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> * License terms: GNU General Public License (GPL) version 2 */ #include <linux/platform_device.h> #include <linux/usb/musb.h> #include <linux/dma-mapping.h> #include <plat/ste_dma40.h> #include <mach/hardware.h> #include <mach/usb.h> #define MUSB_DMA40_RX_CH { \ .mode = STEDMA40_MODE_LOGICAL, \ .dir = STEDMA40_PERIPH_TO_MEM, \ .dst_dev_type = STEDMA40_DEV_DST_MEMORY, \ .src_info.data_width = STEDMA40_WORD_WIDTH, \ .dst_info.data_width = STEDMA40_WORD_WIDTH, \ .src_info.psize = STEDMA40_PSIZE_LOG_16, \ .dst_info.psize = STEDMA40_PSIZE_LOG_16, \ } #define MUSB_DMA40_TX_CH { \ .mode = STEDMA40_MODE_LOGICAL, \ .dir = STEDMA40_MEM_TO_PERIPH, \ .src_dev_type = STEDMA40_DEV_SRC_MEMORY, \ .src_info.data_width = STEDMA40_WORD_WIDTH, \ .dst_info.data_width = STEDMA40_WORD_WIDTH, \ .src_info.psize = STEDMA40_PSIZE_LOG_16, \ .dst_info.psize = STEDMA40_PSIZE_LOG_16, \ } static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_CHANNELS] = { MUSB_DMA40_RX_CH, MUSB_DMA40_RX_CH, MUSB_DMA40_RX_CH, MUSB_DMA40_RX_CH, MUSB_DMA40_RX_CH, MUSB_DMA40_RX_CH, MUSB_DMA40_RX_CH, MUSB_DMA40_RX_CH }; static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_TX_CHANNELS] = { MUSB_DMA40_TX_CH, MUSB_DMA40_TX_CH, MUSB_DMA40_TX_CH, MUSB_DMA40_TX_CH, MUSB_DMA40_TX_CH, MUSB_DMA40_TX_CH, MUSB_DMA40_TX_CH, MUSB_DMA40_TX_CH, }; static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_CHANNELS] = { &musb_dma_rx_ch[0], &musb_dma_rx_ch[1], &musb_dma_rx_ch[2], &musb_dma_rx_ch[3], &musb_dma_rx_ch[4], &musb_dma_rx_ch[5], &musb_dma_rx_ch[6], &musb_dma_rx_ch[7] }; static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_TX_CHANNELS] = { &musb_dma_tx_ch[0], &musb_dma_tx_ch[1], &musb_dma_tx_ch[2], &musb_dma_tx_ch[3], &musb_dma_tx_ch[4], &musb_dma_tx_ch[5], &musb_dma_tx_ch[6], &musb_dma_tx_ch[7] }; static struct ux500_musb_board_data musb_board_data = { .dma_rx_param_array = ux500_dma_rx_param_array, .dma_tx_param_array = ux500_dma_tx_param_array, .num_rx_channels = UX500_MUSB_DMA_NUM_RX_CHANNELS, .num_tx_channels = UX500_MUSB_DMA_NUM_TX_CHANNELS, .dma_filter = stedma40_filter, }; static u64 ux500_musb_dmamask = DMA_BIT_MASK(32); static struct musb_hdrc_config musb_hdrc_config = { .multipoint = true, .dyn_fifo = true, .num_eps = 16, .ram_bits = 16, }; static struct musb_hdrc_platform_data musb_platform_data = { .mode = MUSB_OTG, .config = &musb_hdrc_config, .board_data = &musb_board_data, }; static struct resource usb_resources[] = { [0] = { .name = "usb-mem", .flags = IORESOURCE_MEM, }, [1] = { .name = "mc", /* hard-coded in musb */ .flags = IORESOURCE_IRQ, }, }; struct platform_device ux500_musb_device = { .name = "musb-ux500", .id = 0, .dev = { .platform_data = &musb_platform_data, .dma_mask = &ux500_musb_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(usb_resources), .resource = usb_resources, }; static inline void ux500_usb_dma_update_rx_ch_config(int *src_dev_type) { u32 idx; for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_CHANNELS; idx++) musb_dma_rx_ch[idx].src_dev_type = src_dev_type[idx]; } static inline void ux500_usb_dma_update_tx_ch_config(int *dst_dev_type) { u32 idx; for (idx = 0; idx < UX500_MUSB_DMA_NUM_TX_CHANNELS; idx++) musb_dma_tx_ch[idx].dst_dev_type = dst_dev_type[idx]; } void ux500_add_usb(struct device *parent, resource_size_t base, int irq, int *dma_rx_cfg, int *dma_tx_cfg) { ux500_musb_device.resource[0].start = base; ux500_musb_device.resource[0].end = base + SZ_64K - 1; ux500_musb_device.resource[1].start = irq; ux500_musb_device.resource[1].end = irq; ux500_usb_dma_update_rx_ch_config(dma_rx_cfg); ux500_usb_dma_update_tx_ch_config(dma_tx_cfg); ux500_musb_device.dev.parent = parent; platform_device_register(&ux500_musb_device); }
gpl-2.0
Validus-Kernel/android_kernel_moto_shamu
arch/cris/arch-v10/kernel/debugport.c
6899
12429
/* Serialport functions for debugging * * Copyright (c) 2000-2007 Axis Communications AB * * Authors: Bjorn Wesen * * Exports: * console_print_etrax(char *buf) * int getDebugChar() * putDebugChar(int) * enableDebugIRQ() * init_etrax_debug() * */ #include <linux/console.h> #include <linux/init.h> #include <linux/major.h> #include <linux/delay.h> #include <linux/tty.h> #include <arch/svinto.h> #include <asm/io.h> /* Get SIMCOUT. */ extern void reset_watchdog(void); struct dbg_port { unsigned int index; const volatile unsigned* read; volatile char* write; volatile unsigned* xoff; volatile char* baud; volatile char* tr_ctrl; volatile char* rec_ctrl; unsigned long irq; unsigned int started; unsigned long baudrate; unsigned char parity; unsigned int bits; }; struct dbg_port ports[]= { { 0, R_SERIAL0_READ, R_SERIAL0_TR_DATA, R_SERIAL0_XOFF, R_SERIAL0_BAUD, R_SERIAL0_TR_CTRL, R_SERIAL0_REC_CTRL, IO_STATE(R_IRQ_MASK1_SET, ser0_data, set), 0, 115200, 'N', 8 }, { 1, R_SERIAL1_READ, R_SERIAL1_TR_DATA, R_SERIAL1_XOFF, R_SERIAL1_BAUD, R_SERIAL1_TR_CTRL, R_SERIAL1_REC_CTRL, IO_STATE(R_IRQ_MASK1_SET, ser1_data, set), 0, 115200, 'N', 8 }, { 2, R_SERIAL2_READ, R_SERIAL2_TR_DATA, R_SERIAL2_XOFF, R_SERIAL2_BAUD, R_SERIAL2_TR_CTRL, R_SERIAL2_REC_CTRL, IO_STATE(R_IRQ_MASK1_SET, ser2_data, set), 0, 115200, 'N', 8 }, { 3, R_SERIAL3_READ, R_SERIAL3_TR_DATA, R_SERIAL3_XOFF, R_SERIAL3_BAUD, R_SERIAL3_TR_CTRL, R_SERIAL3_REC_CTRL, IO_STATE(R_IRQ_MASK1_SET, ser3_data, set), 0, 115200, 'N', 8 } }; #ifdef CONFIG_ETRAX_SERIAL extern struct tty_driver *serial_driver; #endif struct dbg_port* port = #if defined(CONFIG_ETRAX_DEBUG_PORT0) &ports[0]; #elif defined(CONFIG_ETRAX_DEBUG_PORT1) &ports[1]; #elif defined(CONFIG_ETRAX_DEBUG_PORT2) &ports[2]; #elif defined(CONFIG_ETRAX_DEBUG_PORT3) &ports[3]; #else NULL; #endif static struct dbg_port* kgdb_port = #if defined(CONFIG_ETRAX_KGDB_PORT0) &ports[0]; #elif defined(CONFIG_ETRAX_KGDB_PORT1) &ports[1]; #elif defined(CONFIG_ETRAX_KGDB_PORT2) &ports[2]; #elif defined(CONFIG_ETRAX_KGDB_PORT3) &ports[3]; #else NULL; #endif static void start_port(struct dbg_port* p) { unsigned long rec_ctrl = 0; unsigned long tr_ctrl = 0; if (!p) return; if (p->started) return; p->started = 1; if (p->index == 0) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, unused); } else if (p->index == 1) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, usb); } else if (p->index == 2) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, par0); genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, par0); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, ser2, select); } else { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, par1); genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, par1); genconfig_shadow |= IO_STATE(R_GEN_CONFIG, ser3, select); } *R_GEN_CONFIG = genconfig_shadow; *p->xoff = IO_STATE(R_SERIAL0_XOFF, tx_stop, enable) | IO_STATE(R_SERIAL0_XOFF, auto_xoff, disable) | IO_FIELD(R_SERIAL0_XOFF, xoff_char, 0); switch (p->baudrate) { case 0: case 115200: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c115k2Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c115k2Hz); break; case 1200: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c1200Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c1200Hz); break; case 2400: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c2400Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c2400Hz); break; case 4800: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c4800Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c4800Hz); break; case 9600: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c9600Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c9600Hz); break; case 19200: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c19k2Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c19k2Hz); break; case 38400: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c38k4Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c38k4Hz); break; case 57600: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c57k6Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c57k6Hz); break; default: *p->baud = IO_STATE(R_SERIAL0_BAUD, tr_baud, c115k2Hz) | IO_STATE(R_SERIAL0_BAUD, rec_baud, c115k2Hz); break; } if (p->parity == 'E') { rec_ctrl = IO_STATE(R_SERIAL0_REC_CTRL, rec_par, even) | IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable); tr_ctrl = IO_STATE(R_SERIAL0_TR_CTRL, tr_par, even) | IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable); } else if (p->parity == 'O') { rec_ctrl = IO_STATE(R_SERIAL0_REC_CTRL, rec_par, odd) | IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable); tr_ctrl = IO_STATE(R_SERIAL0_TR_CTRL, tr_par, odd) | IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable); } else { rec_ctrl = IO_STATE(R_SERIAL0_REC_CTRL, rec_par, even) | IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, disable); tr_ctrl = IO_STATE(R_SERIAL0_TR_CTRL, tr_par, even) | IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, disable); } if (p->bits == 7) { rec_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_7bit); tr_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_7bit); } else { rec_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_8bit); tr_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_8bit); } *p->rec_ctrl = IO_STATE(R_SERIAL0_REC_CTRL, dma_err, stop) | IO_STATE(R_SERIAL0_REC_CTRL, rec_enable, enable) | IO_STATE(R_SERIAL0_REC_CTRL, rts_, active) | IO_STATE(R_SERIAL0_REC_CTRL, sampling, middle) | IO_STATE(R_SERIAL0_REC_CTRL, rec_stick_par, normal) | rec_ctrl; *p->tr_ctrl = IO_FIELD(R_SERIAL0_TR_CTRL, txd, 0) | IO_STATE(R_SERIAL0_TR_CTRL, tr_enable, enable) | IO_STATE(R_SERIAL0_TR_CTRL, auto_cts, disabled) | IO_STATE(R_SERIAL0_TR_CTRL, stop_bits, one_bit) | IO_STATE(R_SERIAL0_TR_CTRL, tr_stick_par, normal) | tr_ctrl; } static void console_write_direct(struct console *co, const char *buf, unsigned int len) { int i; unsigned long flags; if (!port) return; local_irq_save(flags); /* Send data */ for (i = 0; i < len; i++) { /* LF -> CRLF */ if (buf[i] == '\n') { while (!(*port->read & IO_MASK(R_SERIAL0_READ, tr_ready))) ; *port->write = '\r'; } /* Wait until transmitter is ready and send.*/ while (!(*port->read & IO_MASK(R_SERIAL0_READ, tr_ready))) ; *port->write = buf[i]; } /* * Feed the watchdog, otherwise it will reset the chip during boot. * The time to send an ordinary boot message line (10-90 chars) * varies between 1-8ms at 115200. What makes up for the additional * 90ms that allows the watchdog to bite? */ reset_watchdog(); local_irq_restore(flags); } static void console_write(struct console *co, const char *buf, unsigned int len) { if (!port) return; #ifdef CONFIG_SVINTO_SIM /* no use to simulate the serial debug output */ SIMCOUT(buf, len); return; #endif console_write_direct(co, buf, len); } /* legacy function */ void console_print_etrax(const char *buf) { console_write(NULL, buf, strlen(buf)); } /* Use polling to get a single character FROM the debug port */ int getDebugChar(void) { unsigned long readval; if (!kgdb_port) return 0; do { readval = *kgdb_port->read; } while (!(readval & IO_MASK(R_SERIAL0_READ, data_avail))); return (readval & IO_MASK(R_SERIAL0_READ, data_in)); } /* Use polling to put a single character to the debug port */ void putDebugChar(int val) { if (!kgdb_port) return; while (!(*kgdb_port->read & IO_MASK(R_SERIAL0_READ, tr_ready))) ; *kgdb_port->write = val; } /* Enable irq for receiving chars on the debug port, used by kgdb */ void enableDebugIRQ(void) { if (!kgdb_port) return; *R_IRQ_MASK1_SET = kgdb_port->irq; /* use R_VECT_MASK directly, since we really bypass Linux normal * IRQ handling in kgdb anyway, we don't need to use enable_irq */ *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set); *kgdb_port->rec_ctrl = IO_STATE(R_SERIAL0_REC_CTRL, rec_enable, enable); } static int __init console_setup(struct console *co, char *options) { char* s; if (options) { port = &ports[co->index]; port->baudrate = 115200; port->parity = 'N'; port->bits = 8; port->baudrate = simple_strtoul(options, NULL, 10); s = options; while(*s >= '0' && *s <= '9') s++; if (*s) port->parity = *s++; if (*s) port->bits = *s++ - '0'; port->started = 0; start_port(0); } return 0; } /* This is a dummy serial device that throws away anything written to it. * This is used when no debug output is wanted. */ static struct tty_driver dummy_driver; static int dummy_open(struct tty_struct *tty, struct file * filp) { return 0; } static void dummy_close(struct tty_struct *tty, struct file * filp) { } static int dummy_write(struct tty_struct * tty, const unsigned char *buf, int count) { return count; } static int dummy_write_room(struct tty_struct *tty) { return 8192; } static const struct tty_operations dummy_ops = { .open = dummy_open, .close = dummy_close, .write = dummy_write, .write_room = dummy_write_room, }; void __init init_dummy_console(void) { memset(&dummy_driver, 0, sizeof(struct tty_driver)); dummy_driver.driver_name = "serial"; dummy_driver.name = "ttyS"; dummy_driver.major = TTY_MAJOR; dummy_driver.minor_start = 68; dummy_driver.num = 1; /* etrax100 has 4 serial ports */ dummy_driver.type = TTY_DRIVER_TYPE_SERIAL; dummy_driver.subtype = SERIAL_TYPE_NORMAL; dummy_driver.init_termios = tty_std_termios; /* Normally B9600 default... */ dummy_driver.init_termios.c_cflag = B115200 | CS8 | CREAD | HUPCL | CLOCAL; dummy_driver.flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; dummy_driver.init_termios.c_ispeed = 115200; dummy_driver.init_termios.c_ospeed = 115200; dummy_driver.ops = &dummy_ops; if (tty_register_driver(&dummy_driver)) panic("Couldn't register dummy serial driver\n"); } static struct tty_driver* etrax_console_device(struct console* co, int *index) { if (port) *index = port->index; else *index = 0; #ifdef CONFIG_ETRAX_SERIAL return port ? serial_driver : &dummy_driver; #else return &dummy_driver; #endif } static struct console sercons = { name : "ttyS", write: console_write, read : NULL, device : etrax_console_device, unblank : NULL, setup : console_setup, flags : CON_PRINTBUFFER, index : -1, cflag : 0, next : NULL }; static struct console sercons0 = { name : "ttyS", write: console_write, read : NULL, device : etrax_console_device, unblank : NULL, setup : console_setup, flags : CON_PRINTBUFFER, index : 0, cflag : 0, next : NULL }; static struct console sercons1 = { name : "ttyS", write: console_write, read : NULL, device : etrax_console_device, unblank : NULL, setup : console_setup, flags : CON_PRINTBUFFER, index : 1, cflag : 0, next : NULL }; static struct console sercons2 = { name : "ttyS", write: console_write, read : NULL, device : etrax_console_device, unblank : NULL, setup : console_setup, flags : CON_PRINTBUFFER, index : 2, cflag : 0, next : NULL }; static struct console sercons3 = { name : "ttyS", write: console_write, read : NULL, device : etrax_console_device, unblank : NULL, setup : console_setup, flags : CON_PRINTBUFFER, index : 3, cflag : 0, next : NULL }; /* * Register console (for printk's etc) */ int __init init_etrax_debug(void) { static int first = 1; if (!first) { unregister_console(&sercons); register_console(&sercons0); register_console(&sercons1); register_console(&sercons2); register_console(&sercons3); init_dummy_console(); return 0; } first = 0; register_console(&sercons); start_port(port); #ifdef CONFIG_ETRAX_KGDB start_port(kgdb_port); #endif return 0; } __initcall(init_etrax_debug);
gpl-2.0
xiaokang1986/Kylin-kernel-3.4.10
drivers/isdn/sc/command.c
9203
8536
/* $Id: command.c,v 1.4.10.1 2001/09/23 22:24:59 kai Exp $ * * Copyright (C) 1996 SpellCaster Telecommunications Inc. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * For more information, please contact gpl-info@spellcast.com or write: * * SpellCaster Telecommunications Inc. * 5621 Finch Avenue East, Unit #3 * Scarborough, Ontario Canada * M1B 2T9 * +1 (416) 297-8565 * +1 (416) 297-6433 Facsimile */ #include <linux/module.h> #include "includes.h" /* This must be first */ #include "hardware.h" #include "message.h" #include "card.h" #include "scioc.h" static int dial(int card, unsigned long channel, setup_parm setup); static int hangup(int card, unsigned long channel); static int answer(int card, unsigned long channel); static int clreaz(int card, unsigned long channel); static int seteaz(int card, unsigned long channel, char *); static int setl2(int card, unsigned long arg); static int setl3(int card, unsigned long arg); static int acceptb(int card, unsigned long channel); #ifdef DEBUG /* * Translate command codes to strings */ static char *commands[] = { "ISDN_CMD_IOCTL", "ISDN_CMD_DIAL", "ISDN_CMD_ACCEPTB", "ISDN_CMD_ACCEPTB", "ISDN_CMD_HANGUP", "ISDN_CMD_CLREAZ", "ISDN_CMD_SETEAZ", NULL, NULL, NULL, "ISDN_CMD_SETL2", NULL, "ISDN_CMD_SETL3", NULL, NULL, NULL, NULL, NULL, }; /* * Translates ISDN4Linux protocol codes to strings for debug messages */ static char *l3protos[] = { "ISDN_PROTO_L3_TRANS" }; static char *l2protos[] = { "ISDN_PROTO_L2_X75I", "ISDN_PROTO_L2_X75UI", "ISDN_PROTO_L2_X75BUI", "ISDN_PROTO_L2_HDLC", "ISDN_PROTO_L2_TRANS" }; #endif int get_card_from_id(int driver) { int i; for (i = 0; i < cinst; i++) { if (sc_adapter[i]->driverId == driver) return i; } return -ENODEV; } /* * command */ int command(isdn_ctrl *cmd) { int card; card = get_card_from_id(cmd->driver); if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } /* * Dispatch the command */ switch (cmd->command) { case ISDN_CMD_IOCTL: { unsigned long cmdptr; scs_ioctl ioc; memcpy(&cmdptr, cmd->parm.num, sizeof(unsigned long)); if (copy_from_user(&ioc, (scs_ioctl __user *)cmdptr, sizeof(scs_ioctl))) { pr_debug("%s: Failed to verify user space 0x%lx\n", sc_adapter[card]->devicename, cmdptr); return -EFAULT; } return sc_ioctl(card, &ioc); } case ISDN_CMD_DIAL: return dial(card, cmd->arg, cmd->parm.setup); case ISDN_CMD_HANGUP: return hangup(card, cmd->arg); case ISDN_CMD_ACCEPTD: return answer(card, cmd->arg); case ISDN_CMD_ACCEPTB: return acceptb(card, cmd->arg); case ISDN_CMD_CLREAZ: return clreaz(card, cmd->arg); case ISDN_CMD_SETEAZ: return seteaz(card, cmd->arg, cmd->parm.num); case ISDN_CMD_SETL2: return setl2(card, cmd->arg); case ISDN_CMD_SETL3: return setl3(card, cmd->arg); default: return -EINVAL; } return 0; } /* * start the onboard firmware */ int startproc(int card) { int status; if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } /* * send start msg */ status = sendmessage(card, CMPID, cmReqType2, cmReqClass0, cmReqStartProc, 0, 0, NULL); pr_debug("%s: Sent startProc\n", sc_adapter[card]->devicename); return status; } /* * Dials the number passed in */ static int dial(int card, unsigned long channel, setup_parm setup) { int status; char Phone[48]; if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } /*extract ISDN number to dial from eaz/msn string*/ strcpy(Phone, setup.phone); /*send the connection message*/ status = sendmessage(card, CEPID, ceReqTypePhy, ceReqClass1, ceReqPhyConnect, (unsigned char)channel + 1, strlen(Phone), (unsigned int *)Phone); pr_debug("%s: Dialing %s on channel %lu\n", sc_adapter[card]->devicename, Phone, channel + 1); return status; } /* * Answer an incoming call */ static int answer(int card, unsigned long channel) { if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } if (setup_buffers(card, channel + 1)) { hangup(card, channel + 1); return -ENOBUFS; } indicate_status(card, ISDN_STAT_BCONN, channel, NULL); pr_debug("%s: Answered incoming call on channel %lu\n", sc_adapter[card]->devicename, channel + 1); return 0; } /* * Hangup up the call on specified channel */ static int hangup(int card, unsigned long channel) { int status; if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } status = sendmessage(card, CEPID, ceReqTypePhy, ceReqClass1, ceReqPhyDisconnect, (unsigned char)channel + 1, 0, NULL); pr_debug("%s: Sent HANGUP message to channel %lu\n", sc_adapter[card]->devicename, channel + 1); return status; } /* * Set the layer 2 protocol (X.25, HDLC, Raw) */ static int setl2(int card, unsigned long arg) { int status = 0; int protocol, channel; if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } protocol = arg >> 8; channel = arg & 0xff; sc_adapter[card]->channel[channel].l2_proto = protocol; /* * check that the adapter is also set to the correct protocol */ pr_debug("%s: Sending GetFrameFormat for channel %d\n", sc_adapter[card]->devicename, channel + 1); status = sendmessage(card, CEPID, ceReqTypeCall, ceReqClass0, ceReqCallGetFrameFormat, (unsigned char)channel + 1, 1, (unsigned int *)protocol); if (status) return status; return 0; } /* * Set the layer 3 protocol */ static int setl3(int card, unsigned long channel) { int protocol = channel >> 8; if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } sc_adapter[card]->channel[channel].l3_proto = protocol; return 0; } static int acceptb(int card, unsigned long channel) { if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } if (setup_buffers(card, channel + 1)) { hangup(card, channel + 1); return -ENOBUFS; } pr_debug("%s: B-Channel connection accepted on channel %lu\n", sc_adapter[card]->devicename, channel + 1); indicate_status(card, ISDN_STAT_BCONN, channel, NULL); return 0; } static int clreaz(int card, unsigned long arg) { if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } strcpy(sc_adapter[card]->channel[arg].eazlist, ""); sc_adapter[card]->channel[arg].eazclear = 1; pr_debug("%s: EAZ List cleared for channel %lu\n", sc_adapter[card]->devicename, arg + 1); return 0; } static int seteaz(int card, unsigned long arg, char *num) { if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } strcpy(sc_adapter[card]->channel[arg].eazlist, num); sc_adapter[card]->channel[arg].eazclear = 0; pr_debug("%s: EAZ list for channel %lu set to: %s\n", sc_adapter[card]->devicename, arg + 1, sc_adapter[card]->channel[arg].eazlist); return 0; } int reset(int card) { unsigned long flags; if (!IS_VALID_CARD(card)) { pr_debug("Invalid param: %d is not a valid card id\n", card); return -ENODEV; } indicate_status(card, ISDN_STAT_STOP, 0, NULL); if (sc_adapter[card]->EngineUp) { del_timer(&sc_adapter[card]->stat_timer); } sc_adapter[card]->EngineUp = 0; spin_lock_irqsave(&sc_adapter[card]->lock, flags); init_timer(&sc_adapter[card]->reset_timer); sc_adapter[card]->reset_timer.function = sc_check_reset; sc_adapter[card]->reset_timer.data = card; sc_adapter[card]->reset_timer.expires = jiffies + CHECKRESET_TIME; add_timer(&sc_adapter[card]->reset_timer); spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); outb(0x1, sc_adapter[card]->ioport[SFT_RESET]); pr_debug("%s: Adapter Reset\n", sc_adapter[card]->devicename); return 0; } void flushreadfifo(int card) { while (inb(sc_adapter[card]->ioport[FIFO_STATUS]) & RF_HAS_DATA) inb(sc_adapter[card]->ioport[FIFO_READ]); }
gpl-2.0
AOKP/kernel_lge_msm8974
arch/arm/plat-iop/i2c.c
9715
1783
/* * arch/arm/plat-iop/i2c.c * * Author: Nicolas Pitre <nico@cam.org> * Copyright (C) 2001 MontaVista Software, Inc. * Copyright (C) 2004 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <linux/init.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_core.h> #include <linux/io.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/mach/map.h> #include <asm/setup.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/hardware/iop3xx.h> #include <asm/mach/arch.h> #ifdef CONFIG_ARCH_IOP32X #define IRQ_IOP3XX_I2C_0 IRQ_IOP32X_I2C_0 #define IRQ_IOP3XX_I2C_1 IRQ_IOP32X_I2C_1 #endif #ifdef CONFIG_ARCH_IOP33X #define IRQ_IOP3XX_I2C_0 IRQ_IOP33X_I2C_0 #define IRQ_IOP3XX_I2C_1 IRQ_IOP33X_I2C_1 #endif static struct resource iop3xx_i2c0_resources[] = { [0] = { .start = 0xfffff680, .end = 0xfffff697, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP3XX_I2C_0, .end = IRQ_IOP3XX_I2C_0, .flags = IORESOURCE_IRQ, }, }; struct platform_device iop3xx_i2c0_device = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop3xx_i2c0_resources, }; static struct resource iop3xx_i2c1_resources[] = { [0] = { .start = 0xfffff6a0, .end = 0xfffff6b7, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP3XX_I2C_1, .end = IRQ_IOP3XX_I2C_1, .flags = IORESOURCE_IRQ, } }; struct platform_device iop3xx_i2c1_device = { .name = "IOP3xx-I2C", .id = 1, .num_resources = 2, .resource = iop3xx_i2c1_resources, };
gpl-2.0
DirtyUnicorns/android_kernel_htc_msm8660-caf
arch/x86/pci/irq.c
10483
33408
/* * Low-Level PCI Support for PC -- Routing of Interrupts * * (c) 1999--2000 Martin Mares <mj@ucw.cz> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/dmi.h> #include <linux/io.h> #include <linux/smp.h> #include <asm/io_apic.h> #include <linux/irq.h> #include <linux/acpi.h> #include <asm/pci_x86.h> #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) #define PIRQ_VERSION 0x0100 static int broken_hp_bios_irq9; static int acer_tm360_irqrouting; static struct irq_routing_table *pirq_table; static int pirq_enable_irq(struct pci_dev *dev); /* * Never use: 0, 1, 2 (timer, keyboard, and cascade) * Avoid using: 13, 14 and 15 (FP error and IDE). * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse) */ unsigned int pcibios_irq_mask = 0xfff8; static int pirq_penalty[16] = { 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000, 0, 0, 0, 0, 1000, 100000, 100000, 100000 }; struct irq_router { char *name; u16 vendor, device; int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq); int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new); }; struct irq_router_handler { u16 vendor; int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device); }; int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq; void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; /* * Check passed address for the PCI IRQ Routing Table signature * and perform checksum verification. */ static inline struct irq_routing_table *pirq_check_routing_table(u8 *addr) { struct irq_routing_table *rt; int i; u8 sum; rt = (struct irq_routing_table *) addr; if (rt->signature != PIRQ_SIGNATURE || rt->version != PIRQ_VERSION || rt->size % 16 || rt->size < sizeof(struct irq_routing_table)) return NULL; sum = 0; for (i = 0; i < rt->size; i++) sum += addr[i]; if (!sum) { DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt); return rt; } return NULL; } /* * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table. */ static struct irq_routing_table * __init pirq_find_routing_table(void) { u8 *addr; struct irq_routing_table *rt; if (pirq_table_addr) { rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr)); if (rt) return rt; printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n"); } for (addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) { rt = pirq_check_routing_table(addr); if (rt) return rt; } return NULL; } /* * If we have a IRQ routing table, use it to search for peer host * bridges. It's a gross hack, but since there are no other known * ways how to get a list of buses, we have to go this way. */ static void __init pirq_peer_trick(void) { struct irq_routing_table *rt = pirq_table; u8 busmap[256]; int i; struct irq_info *e; memset(busmap, 0, sizeof(busmap)); for (i = 0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) { e = &rt->slots[i]; #ifdef DEBUG { int j; DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot); for (j = 0; j < 4; j++) DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap); DBG("\n"); } #endif busmap[e->bus] = 1; } for (i = 1; i < 256; i++) { int node; if (!busmap[i] || pci_find_bus(0, i)) continue; node = get_mp_bus_to_node(i); if (pci_scan_bus_on_node(i, &pci_root_ops, node)) printk(KERN_INFO "PCI: Discovered primary peer " "bus %02x [IRQ]\n", i); } pcibios_last_bus = -1; } /* * Code for querying and setting of IRQ routes on various interrupt routers. */ void eisa_set_level_irq(unsigned int irq) { unsigned char mask = 1 << (irq & 7); unsigned int port = 0x4d0 + (irq >> 3); unsigned char val; static u16 eisa_irq_mask; if (irq >= 16 || (1 << irq) & eisa_irq_mask) return; eisa_irq_mask |= (1 << irq); printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq); val = inb(port); if (!(val & mask)) { DBG(KERN_DEBUG " -> edge"); outb(val | mask, port); } } /* * Common IRQ routing practice: nibbles in config space, * offset by some magic constant. */ static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) { u8 x; unsigned reg = offset + (nr >> 1); pci_read_config_byte(router, reg, &x); return (nr & 1) ? (x >> 4) : (x & 0xf); } static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val) { u8 x; unsigned reg = offset + (nr >> 1); pci_read_config_byte(router, reg, &x); x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val); pci_write_config_byte(router, reg, x); } /* * ALI pirq entries are damn ugly, and completely undocumented. * This has been figured out from pirq tables, and it's not a pretty * picture. */ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; WARN_ON_ONCE(pirq > 16); return irqmap[read_config_nybble(router, 0x48, pirq-1)]; } static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 }; unsigned int val = irqmap[irq]; WARN_ON_ONCE(pirq > 16); if (val) { write_config_nybble(router, 0x48, pirq-1, val); return 1; } return 0; } /* * The Intel PIIX4 pirq rules are fairly simple: "pirq" is * just a pointer to the config space. */ static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 x; pci_read_config_byte(router, pirq, &x); return (x < 16) ? x : 0; } static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { pci_write_config_byte(router, pirq, irq); return 1; } /* * The VIA pirq rules are nibble-based, like ALI, * but without the ugly irq number munging. * However, PIRQD is in the upper instead of lower 4 bits. */ static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq); } static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq); return 1; } /* * The VIA pirq rules are nibble-based, like ALI, * but without the ugly irq number munging. * However, for 82C586, nibble map is different . */ static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; WARN_ON_ONCE(pirq > 5); return read_config_nybble(router, 0x55, pirqmap[pirq-1]); } static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 }; WARN_ON_ONCE(pirq > 5); write_config_nybble(router, 0x55, pirqmap[pirq-1], irq); return 1; } /* * ITE 8330G pirq rules are nibble-based * FIXME: pirqmap may be { 1, 0, 3, 2 }, * 2+3 are both mapped to irq 9 on my system */ static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; WARN_ON_ONCE(pirq > 4); return read_config_nybble(router, 0x43, pirqmap[pirq-1]); } static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static const unsigned char pirqmap[4] = { 1, 0, 2, 3 }; WARN_ON_ONCE(pirq > 4); write_config_nybble(router, 0x43, pirqmap[pirq-1], irq); return 1; } /* * OPTI: high four bits are nibble pointer.. * I wonder what the low bits do? */ static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0xb8, pirq >> 4); } static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0xb8, pirq >> 4, irq); return 1; } /* * Cyrix: nibble offset 0x5C * 0x5C bits 7:4 is INTB bits 3:0 is INTA * 0x5D bits 7:4 is INTD bits 3:0 is INTC */ static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { return read_config_nybble(router, 0x5C, (pirq-1)^1); } static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { write_config_nybble(router, 0x5C, (pirq-1)^1, irq); return 1; } /* * PIRQ routing for SiS 85C503 router used in several SiS chipsets. * We have to deal with the following issues here: * - vendors have different ideas about the meaning of link values * - some onboard devices (integrated in the chipset) have special * links and are thus routed differently (i.e. not via PCI INTA-INTD) * - different revision of the router have a different layout for * the routing registers, particularly for the onchip devices * * For all routing registers the common thing is we have one byte * per routeable link which is defined as: * bit 7 IRQ mapping enabled (0) or disabled (1) * bits [6:4] reserved (sometimes used for onchip devices) * bits [3:0] IRQ to map to * allowed: 3-7, 9-12, 14-15 * reserved: 0, 1, 2, 8, 13 * * The config-space registers located at 0x41/0x42/0x43/0x44 are * always used to route the normal PCI INT A/B/C/D respectively. * Apparently there are systems implementing PCI routing table using * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D. * We try our best to handle both link mappings. * * Currently (2003-05-21) it appears most SiS chipsets follow the * definition of routing registers from the SiS-5595 southbridge. * According to the SiS 5595 datasheets the revision id's of the * router (ISA-bridge) should be 0x01 or 0xb0. * * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1. * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets. * They seem to work with the current routing code. However there is * some concern because of the two USB-OHCI HCs (original SiS 5595 * had only one). YMMV. * * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1: * * 0x61: IDEIRQ: * bits [6:5] must be written 01 * bit 4 channel-select primary (0), secondary (1) * * 0x62: USBIRQ: * bit 6 OHCI function disabled (0), enabled (1) * * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved * * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved * * We support USBIRQ (in addition to INTA-INTD) and keep the * IDE, ACPI and DAQ routing untouched as set by the BIOS. * * Currently the only reported exception is the new SiS 65x chipset * which includes the SiS 69x southbridge. Here we have the 85C503 * router revision 0x04 and there are changes in the register layout * mostly related to the different USB HCs with USB 2.0 support. * * Onchip routing for router rev-id 0x04 (try-and-error observation) * * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs * bit 6-4 are probably unused, not like 5595 */ #define PIRQ_SIS_IRQ_MASK 0x0f #define PIRQ_SIS_IRQ_DISABLE 0x80 #define PIRQ_SIS_USB_ENABLE 0x40 static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 x; int reg; reg = pirq; if (reg >= 0x01 && reg <= 0x04) reg += 0x40; pci_read_config_byte(router, reg, &x); return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK); } static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { u8 x; int reg; reg = pirq; if (reg >= 0x01 && reg <= 0x04) reg += 0x40; pci_read_config_byte(router, reg, &x); x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE); x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE; pci_write_config_byte(router, reg, x); return 1; } /* * VLSI: nibble offset 0x74 - educated guess due to routing table and * config space of VLSI 82C534 PCI-bridge/router (1004:0102) * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6 * for the busbridge to the docking station. */ static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { WARN_ON_ONCE(pirq >= 9); if (pirq > 8) { dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); return 0; } return read_config_nybble(router, 0x74, pirq-1); } static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { WARN_ON_ONCE(pirq >= 9); if (pirq > 8) { dev_info(&dev->dev, "VLSI router PIRQ escape (%d)\n", pirq); return 0; } write_config_nybble(router, 0x74, pirq-1, irq); return 1; } /* * ServerWorks: PCI interrupts mapped to system IRQ lines through Index * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect * register is a straight binary coding of desired PIC IRQ (low nibble). * * The 'link' value in the PIRQ table is already in the correct format * for the Index register. There are some special index values: * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1, * and 0x03 for SMBus. */ static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { outb(pirq, 0xc00); return inb(0xc01) & 0xf; } static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { outb(pirq, 0xc00); outb(irq, 0xc01); return 1; } /* Support for AMD756 PCI IRQ Routing * Jhon H. Caicedo <jhcaiced@osso.org.co> * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced) * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced) * The AMD756 pirq rules are nibble-based * offset 0x56 0-3 PIRQA 4-7 PIRQB * offset 0x57 0-3 PIRQC 4-7 PIRQD */ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 irq; irq = 0; if (pirq <= 4) irq = read_config_nybble(router, 0x56, pirq - 1); dev_info(&dev->dev, "AMD756: dev [%04x:%04x], router PIRQ %d get IRQ %d\n", dev->vendor, dev->device, pirq, irq); return irq; } static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { dev_info(&dev->dev, "AMD756: dev [%04x:%04x], router PIRQ %d set IRQ %d\n", dev->vendor, dev->device, pirq, irq); if (pirq <= 4) write_config_nybble(router, 0x56, pirq - 1, irq); return 1; } /* * PicoPower PT86C523 */ static int pirq_pico_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { outb(0x10 + ((pirq - 1) >> 1), 0x24); return ((pirq - 1) & 1) ? (inb(0x26) >> 4) : (inb(0x26) & 0xf); } static int pirq_pico_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { unsigned int x; outb(0x10 + ((pirq - 1) >> 1), 0x24); x = inb(0x26); x = ((pirq - 1) & 1) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | (irq)); outb(x, 0x26); return 1; } #ifdef CONFIG_PCI_BIOS static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { struct pci_dev *bridge; int pin = pci_get_interrupt_pin(dev, &bridge); return pcibios_set_irq_routing(bridge, pin - 1, irq); } #endif static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { static struct pci_device_id __initdata pirq_440gx[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) }, { }, }; /* 440GX has a proprietary PIRQ router -- don't use it */ if (pci_dev_present(pirq_440gx)) return 0; switch (device) { case PCI_DEVICE_ID_INTEL_82371FB_0: case PCI_DEVICE_ID_INTEL_82371SB_0: case PCI_DEVICE_ID_INTEL_82371AB_0: case PCI_DEVICE_ID_INTEL_82371MX: case PCI_DEVICE_ID_INTEL_82443MX_0: case PCI_DEVICE_ID_INTEL_82801AA_0: case PCI_DEVICE_ID_INTEL_82801AB_0: case PCI_DEVICE_ID_INTEL_82801BA_0: case PCI_DEVICE_ID_INTEL_82801BA_10: case PCI_DEVICE_ID_INTEL_82801CA_0: case PCI_DEVICE_ID_INTEL_82801CA_12: case PCI_DEVICE_ID_INTEL_82801DB_0: case PCI_DEVICE_ID_INTEL_82801E_0: case PCI_DEVICE_ID_INTEL_82801EB_0: case PCI_DEVICE_ID_INTEL_ESB_1: case PCI_DEVICE_ID_INTEL_ICH6_0: case PCI_DEVICE_ID_INTEL_ICH6_1: case PCI_DEVICE_ID_INTEL_ICH7_0: case PCI_DEVICE_ID_INTEL_ICH7_1: case PCI_DEVICE_ID_INTEL_ICH7_30: case PCI_DEVICE_ID_INTEL_ICH7_31: case PCI_DEVICE_ID_INTEL_TGP_LPC: case PCI_DEVICE_ID_INTEL_ESB2_0: case PCI_DEVICE_ID_INTEL_ICH8_0: case PCI_DEVICE_ID_INTEL_ICH8_1: case PCI_DEVICE_ID_INTEL_ICH8_2: case PCI_DEVICE_ID_INTEL_ICH8_3: case PCI_DEVICE_ID_INTEL_ICH8_4: case PCI_DEVICE_ID_INTEL_ICH9_0: case PCI_DEVICE_ID_INTEL_ICH9_1: case PCI_DEVICE_ID_INTEL_ICH9_2: case PCI_DEVICE_ID_INTEL_ICH9_3: case PCI_DEVICE_ID_INTEL_ICH9_4: case PCI_DEVICE_ID_INTEL_ICH9_5: case PCI_DEVICE_ID_INTEL_EP80579_0: case PCI_DEVICE_ID_INTEL_ICH10_0: case PCI_DEVICE_ID_INTEL_ICH10_1: case PCI_DEVICE_ID_INTEL_ICH10_2: case PCI_DEVICE_ID_INTEL_ICH10_3: case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0: case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1: r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; return 1; } if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX) || (device >= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN && device <= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX)) { r->name = "PIIX/ICH"; r->get = pirq_piix_get; r->set = pirq_piix_set; return 1; } return 0; } static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { /* FIXME: We should move some of the quirk fixup stuff here */ /* * workarounds for some buggy BIOSes */ if (device == PCI_DEVICE_ID_VIA_82C586_0) { switch (router->device) { case PCI_DEVICE_ID_VIA_82C686: /* * Asus k7m bios wrongly reports 82C686A * as 586-compatible */ device = PCI_DEVICE_ID_VIA_82C686; break; case PCI_DEVICE_ID_VIA_8235: /** * Asus a7v-x bios wrongly reports 8235 * as 586-compatible */ device = PCI_DEVICE_ID_VIA_8235; break; case PCI_DEVICE_ID_VIA_8237: /** * Asus a7v600 bios wrongly reports 8237 * as 586-compatible */ device = PCI_DEVICE_ID_VIA_8237; break; } } switch (device) { case PCI_DEVICE_ID_VIA_82C586_0: r->name = "VIA"; r->get = pirq_via586_get; r->set = pirq_via586_set; return 1; case PCI_DEVICE_ID_VIA_82C596: case PCI_DEVICE_ID_VIA_82C686: case PCI_DEVICE_ID_VIA_8231: case PCI_DEVICE_ID_VIA_8233A: case PCI_DEVICE_ID_VIA_8235: case PCI_DEVICE_ID_VIA_8237: /* FIXME: add new ones for 8233/5 */ r->name = "VIA"; r->get = pirq_via_get; r->set = pirq_via_set; return 1; } return 0; } static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_VLSI_82C534: r->name = "VLSI 82C534"; r->get = pirq_vlsi_get; r->set = pirq_vlsi_set; return 1; } return 0; } static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_SERVERWORKS_OSB4: case PCI_DEVICE_ID_SERVERWORKS_CSB5: r->name = "ServerWorks"; r->get = pirq_serverworks_get; r->set = pirq_serverworks_set; return 1; } return 0; } static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { if (device != PCI_DEVICE_ID_SI_503) return 0; r->name = "SIS"; r->get = pirq_sis_get; r->set = pirq_sis_set; return 1; } static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_CYRIX_5520: r->name = "NatSemi"; r->get = pirq_cyrix_get; r->set = pirq_cyrix_set; return 1; } return 0; } static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_OPTI_82C700: r->name = "OPTI"; r->get = pirq_opti_get; r->set = pirq_opti_set; return 1; } return 0; } static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_ITE_IT8330G_0: r->name = "ITE"; r->get = pirq_ite_get; r->set = pirq_ite_set; return 1; } return 0; } static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_AL_M1533: case PCI_DEVICE_ID_AL_M1563: r->name = "ALI"; r->get = pirq_ali_get; r->set = pirq_ali_set; return 1; } return 0; } static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_AMD_VIPER_740B: r->name = "AMD756"; break; case PCI_DEVICE_ID_AMD_VIPER_7413: r->name = "AMD766"; break; case PCI_DEVICE_ID_AMD_VIPER_7443: r->name = "AMD768"; break; default: return 0; } r->get = pirq_amd756_get; r->set = pirq_amd756_set; return 1; } static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) { switch (device) { case PCI_DEVICE_ID_PICOPOWER_PT86C523: r->name = "PicoPower PT86C523"; r->get = pirq_pico_get; r->set = pirq_pico_set; return 1; case PCI_DEVICE_ID_PICOPOWER_PT86C523BBP: r->name = "PicoPower PT86C523 rev. BB+"; r->get = pirq_pico_get; r->set = pirq_pico_set; return 1; } return 0; } static __initdata struct irq_router_handler pirq_routers[] = { { PCI_VENDOR_ID_INTEL, intel_router_probe }, { PCI_VENDOR_ID_AL, ali_router_probe }, { PCI_VENDOR_ID_ITE, ite_router_probe }, { PCI_VENDOR_ID_VIA, via_router_probe }, { PCI_VENDOR_ID_OPTI, opti_router_probe }, { PCI_VENDOR_ID_SI, sis_router_probe }, { PCI_VENDOR_ID_CYRIX, cyrix_router_probe }, { PCI_VENDOR_ID_VLSI, vlsi_router_probe }, { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe }, { PCI_VENDOR_ID_AMD, amd_router_probe }, { PCI_VENDOR_ID_PICOPOWER, pico_router_probe }, /* Someone with docs needs to add the ATI Radeon IGP */ { 0, NULL } }; static struct irq_router pirq_router; static struct pci_dev *pirq_router_dev; /* * FIXME: should we have an option to say "generic for * chipset" ? */ static void __init pirq_find_router(struct irq_router *r) { struct irq_routing_table *rt = pirq_table; struct irq_router_handler *h; #ifdef CONFIG_PCI_BIOS if (!rt->signature) { printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n"); r->set = pirq_bios_set; r->name = "BIOS"; return; } #endif /* Default unless a driver reloads it */ r->name = "default"; r->get = NULL; r->set = NULL; DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for [%04x:%04x]\n", rt->rtr_vendor, rt->rtr_device); pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn); if (!pirq_router_dev) { DBG(KERN_DEBUG "PCI: Interrupt router not found at " "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn); return; } for (h = pirq_routers; h->vendor; h++) { /* First look for a router match */ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device)) break; /* Fall back to a device match */ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device)) break; } dev_info(&pirq_router_dev->dev, "%s IRQ router [%04x:%04x]\n", pirq_router.name, pirq_router_dev->vendor, pirq_router_dev->device); /* The device remains referenced for the kernel lifetime */ } static struct irq_info *pirq_get_info(struct pci_dev *dev) { struct irq_routing_table *rt = pirq_table; int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); struct irq_info *info; for (info = rt->slots; entries--; info++) if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn)) return info; return NULL; } static int pcibios_lookup_irq(struct pci_dev *dev, int assign) { u8 pin; struct irq_info *info; int i, pirq, newirq; int irq = 0; u32 mask; struct irq_router *r = &pirq_router; struct pci_dev *dev2 = NULL; char *msg = NULL; /* Find IRQ pin */ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) { dev_dbg(&dev->dev, "no interrupt pin\n"); return 0; } if (io_apic_assign_pci_irqs) return 0; /* Find IRQ routing entry */ if (!pirq_table) return 0; info = pirq_get_info(dev); if (!info) { dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", 'A' + pin - 1); return 0; } pirq = info->irq[pin - 1].link; mask = info->irq[pin - 1].bitmap; if (!pirq) { dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin - 1); return 0; } dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", 'A' + pin - 1, pirq, mask, pirq_table->exclusive_irqs); mask &= pcibios_irq_mask; /* Work around broken HP Pavilion Notebooks which assign USB to IRQ 9 even though it is actually wired to IRQ 11 */ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) { dev->irq = 11; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); r->set(pirq_router_dev, dev, pirq, 11); } /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) { pirq = 0x68; mask = 0x400; dev->irq = r->get(pirq_router_dev, dev, pirq); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } /* * Find the best IRQ to assign: use the one * reported by the device if possible. */ newirq = dev->irq; if (newirq && !((1 << newirq) & mask)) { if (pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; else dev_warn(&dev->dev, "IRQ %d doesn't match PIRQ mask " "%#x; try pci=usepirqmask\n", newirq, mask); } if (!newirq && assign) { for (i = 0; i < 16; i++) { if (!(mask & (1 << i))) continue; if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED)) newirq = i; } } dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin - 1, newirq); /* Check if it is hardcoded */ if ((pirq & 0xf0) == 0xf0) { irq = pirq & 0xf; msg = "hardcoded"; } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) { msg = "found"; eisa_set_level_irq(irq); } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { if (r->set(pirq_router_dev, dev, pirq, newirq)) { eisa_set_level_irq(newirq); msg = "assigned"; irq = newirq; } } if (!irq) { if (newirq && mask == (1 << newirq)) { msg = "guessed"; irq = newirq; } else { dev_dbg(&dev->dev, "can't route interrupt\n"); return 0; } } dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq); /* Update IRQ for all devices with the same pirq value */ for_each_pci_dev(dev2) { pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); if (!pin) continue; info = pirq_get_info(dev2); if (!info) continue; if (info->irq[pin - 1].link == pirq) { /* * We refuse to override the dev->irq * information. Give a warning! */ if (dev2->irq && dev2->irq != irq && \ (!(pci_probe & PCI_USE_PIRQ_MASK) || \ ((1 << dev2->irq) & mask))) { #ifndef CONFIG_PCI_MSI dev_info(&dev2->dev, "IRQ routing conflict: " "have IRQ %d, want IRQ %d\n", dev2->irq, irq); #endif continue; } dev2->irq = irq; pirq_penalty[irq]++; if (dev != dev2) dev_info(&dev->dev, "sharing IRQ %d with %s\n", irq, pci_name(dev2)); } } return 1; } void __init pcibios_fixup_irqs(void) { struct pci_dev *dev = NULL; u8 pin; DBG(KERN_DEBUG "PCI: IRQ fixup\n"); for_each_pci_dev(dev) { /* * If the BIOS has set an out of range IRQ number, just * ignore it. Also keep track of which IRQ's are * already in use. */ if (dev->irq >= 16) { dev_dbg(&dev->dev, "ignoring bogus IRQ %d\n", dev->irq); dev->irq = 0; } /* * If the IRQ is already assigned to a PCI device, * ignore its ISA use penalty */ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000) pirq_penalty[dev->irq] = 0; pirq_penalty[dev->irq]++; } if (io_apic_assign_pci_irqs) return; dev = NULL; for_each_pci_dev(dev) { pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (!pin) continue; /* * Still no IRQ? Try to lookup one... */ if (!dev->irq) pcibios_lookup_irq(dev, 0); } } /* * Work around broken HP Pavilion Notebooks which assign USB to * IRQ 9 even though it is actually wired to IRQ 11 */ static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d) { if (!broken_hp_bios_irq9) { broken_hp_bios_irq9 = 1; printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); } return 0; } /* * Work around broken Acer TravelMate 360 Notebooks which assign * Cardbus to IRQ 11 even though it is actually wired to IRQ 10 */ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d) { if (!acer_tm360_irqrouting) { acer_tm360_irqrouting = 1; printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); } return 0; } static struct dmi_system_id __initdata pciirq_dmi_table[] = { { .callback = fix_broken_hp_bios_irq9, .ident = "HP Pavilion N5400 Series Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"), DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"), DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), }, }, { .callback = fix_acer_tm360_irqrouting, .ident = "Acer TravelMate 36x Laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, { } }; void __init pcibios_irq_init(void) { DBG(KERN_DEBUG "PCI: IRQ init\n"); if (raw_pci_ops == NULL) return; dmi_check_system(pciirq_dmi_table); pirq_table = pirq_find_routing_table(); #ifdef CONFIG_PCI_BIOS if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) pirq_table = pcibios_get_irq_routing_table(); #endif if (pirq_table) { pirq_peer_trick(); pirq_find_router(&pirq_router); if (pirq_table->exclusive_irqs) { int i; for (i = 0; i < 16; i++) if (!(pirq_table->exclusive_irqs & (1 << i))) pirq_penalty[i] += 100; } /* * If we're using the I/O APIC, avoid using the PCI IRQ * routing table */ if (io_apic_assign_pci_irqs) pirq_table = NULL; } x86_init.pci.fixup_irqs(); if (io_apic_assign_pci_irqs && pci_routeirq) { struct pci_dev *dev = NULL; /* * PCI IRQ routing is set up by pci_enable_device(), but we * also do it here in case there are still broken drivers that * don't use pci_enable_device(). */ printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n"); for_each_pci_dev(dev) pirq_enable_irq(dev); } } static void pirq_penalize_isa_irq(int irq, int active) { /* * If any ISAPnP device reports an IRQ in its list of possible * IRQ's, we try to avoid assigning it to PCI devices. */ if (irq < 16) { if (active) pirq_penalty[irq] += 1000; else pirq_penalty[irq] += 100; } } void pcibios_penalize_isa_irq(int irq, int active) { #ifdef CONFIG_ACPI if (!acpi_noirq) acpi_penalize_isa_irq(irq, active); else #endif pirq_penalize_isa_irq(irq, active); } static int pirq_enable_irq(struct pci_dev *dev) { u8 pin; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin && !pcibios_lookup_irq(dev, 1)) { char *msg = ""; if (!io_apic_assign_pci_irqs && dev->irq) return 0; if (io_apic_assign_pci_irqs) { #ifdef CONFIG_X86_IO_APIC struct pci_dev *temp_dev; int irq; struct io_apic_irq_attr irq_attr; irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1, &irq_attr); /* * Busses behind bridges are typically not listed in the MP-table. * In this case we have to look up the IRQ based on the parent bus, * parent slot, and pin number. The SMP code detects such bridged * busses itself so we should get into this branch reliably. */ temp_dev = dev; while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ struct pci_dev *bridge = dev->bus->self; pin = pci_swizzle_interrupt_pin(dev, pin); irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, PCI_SLOT(bridge->devfn), pin - 1, &irq_attr); if (irq >= 0) dev_warn(&dev->dev, "using bridge %s " "INT %c to get IRQ %d\n", pci_name(bridge), 'A' + pin - 1, irq); dev = bridge; } dev = temp_dev; if (irq >= 0) { io_apic_set_pci_routing(&dev->dev, irq, &irq_attr); dev->irq = irq; dev_info(&dev->dev, "PCI->APIC IRQ transform: " "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); return 0; } else msg = "; probably buggy MP table"; #endif } else if (pci_probe & PCI_BIOS_IRQ_SCAN) msg = ""; else msg = "; please try using pci=biosirq"; /* * With IDE legacy devices the IRQ lookup failure is not * a problem.. */ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5)) return 0; dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", 'A' + pin - 1, msg); } return 0; }
gpl-2.0
gproj-m/lge-kernel-gproj
fs/nls/nls_euc-jp.c
12531
24438
/* * linux/fs/nls/nls_euc-jp.c * * Added `OSF/JVC Recommended Code Set Conversion Specification * between Japanese EUC and Shift-JIS' support: <hirofumi@mail.parknet.co.jp> * (http://www.opengroup.or.jp/jvc/cde/sjis-euc-e.html) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static struct nls_table *p_nls; #define IS_SJIS_LOW_BYTE(l) ((0x40 <= (l)) && ((l) <= 0xFC) && ((l) != 0x7F)) /* JIS X 0208 (include NEC spesial characters) */ #define IS_SJIS_JISX0208(h, l) ((((0x81 <= (h)) && ((h) <= 0x9F)) \ || ((0xE0 <= (h)) && ((h) <= 0xEA))) \ && IS_SJIS_LOW_BYTE(l)) #define IS_SJIS_JISX0201KANA(c) ((0xA1 <= (c)) && ((c) <= 0xDF)) #define IS_SJIS_UDC_LOW(h, l) (((0xF0 <= (h)) && ((h) <= 0xF4)) \ && IS_SJIS_LOW_BYTE(l)) #define IS_SJIS_UDC_HI(h, l) (((0xF5 <= (h)) && ((h) <= 0xF9)) \ && IS_SJIS_LOW_BYTE(l)) #define IS_SJIS_IBM(h, l) (((0xFA <= (h)) && ((h) <= 0xFC)) \ && IS_SJIS_LOW_BYTE(l)) #define IS_SJIS_NECIBM(h, l) (((0xED <= (h)) && ((h) <= 0xEE)) \ && IS_SJIS_LOW_BYTE(l)) #define MAP_SJIS2EUC(sjis_hi, sjis_lo, sjis_p, euc_hi, euc_lo, euc_p) { \ if ((sjis_lo) >= 0x9F) { \ (euc_hi) = (sjis_hi) * 2 - (((sjis_p) * 2 - (euc_p)) - 1); \ (euc_lo) = (sjis_lo) + 2; \ } else { \ (euc_hi) = (sjis_hi) * 2 - ((sjis_p) * 2 - (euc_p)); \ (euc_lo) = (sjis_lo) + ((sjis_lo) >= 0x7F ? 0x60 : 0x61); \ } \ } while(0) #define SS2 (0x8E) /* Single Shift 2 */ #define SS3 (0x8F) /* Single Shift 3 */ #define IS_EUC_BYTE(c) ((0xA1 <= (c)) && ((c) <= 0xFE)) #define IS_EUC_JISX0208(h, l) (IS_EUC_BYTE(h) && IS_EUC_BYTE(l)) #define IS_EUC_JISX0201KANA(h, l) (((h) == SS2) && (0xA1 <= (l) && (l) <= 0xDF)) #define IS_EUC_UDC_LOW(h, l) (((0xF5 <= (h)) && ((h) <= 0xFE)) \ && IS_EUC_BYTE(l)) #define IS_EUC_UDC_HI(h, l) IS_EUC_UDC_LOW(h, l) /* G3 block */ #define MAP_EUC2SJIS(euc_hi, euc_lo, euc_p, sjis_hi, sjis_lo, sjis_p) { \ if ((euc_hi) & 1) { \ (sjis_hi) = (euc_hi) / 2 + ((sjis_p) - (euc_p) / 2); \ (sjis_lo) = (euc_lo) - ((euc_lo) >= 0xE0 ? 0x60 : 0x61); \ } else { \ (sjis_hi) = (euc_hi) / 2 + (((sjis_p) - (euc_p) / 2) - 1); \ (sjis_lo) = (euc_lo) - 2; \ } \ } while(0) /* SJIS IBM extended characters to EUC map */ static const unsigned char sjisibm2euc_map[][2] = { {0xF3, 0xF3}, {0xF3, 0xF4}, {0xF3, 0xF5}, {0xF3, 0xF6}, {0xF3, 0xF7}, {0xF3, 0xF8}, {0xF3, 0xF9}, {0xF3, 0xFA}, {0xF3, 0xFB}, {0xF3, 0xFC}, {0xF3, 0xFD}, {0xF3, 0xFE}, {0xF4, 0xA1}, {0xF4, 0xA2}, {0xF4, 0xA3}, {0xF4, 0xA4}, {0xF4, 0xA5}, {0xF4, 0xA6}, {0xF4, 0xA7}, {0xF4, 0xA8}, {0xA2, 0xCC}, {0xA2, 0xC3}, {0xF4, 0xA9}, {0xF4, 0xAA}, {0xF4, 0xAB}, {0xF4, 0xAC}, {0xF4, 0xAD}, {0xA2, 0xE8}, {0xD4, 0xE3}, {0xDC, 0xDF}, {0xE4, 0xE9}, {0xE3, 0xF8}, {0xD9, 0xA1}, {0xB1, 0xBB}, {0xF4, 0xAE}, {0xC2, 0xAD}, {0xC3, 0xFC}, {0xE4, 0xD0}, {0xC2, 0xBF}, {0xBC, 0xF4}, {0xB0, 0xA9}, {0xB0, 0xC8}, {0xF4, 0xAF}, {0xB0, 0xD2}, {0xB0, 0xD4}, {0xB0, 0xE3}, {0xB0, 0xEE}, {0xB1, 0xA7}, {0xB1, 0xA3}, {0xB1, 0xAC}, {0xB1, 0xA9}, {0xB1, 0xBE}, {0xB1, 0xDF}, {0xB1, 0xD8}, {0xB1, 0xC8}, {0xB1, 0xD7}, {0xB1, 0xE3}, {0xB1, 0xF4}, {0xB1, 0xE1}, {0xB2, 0xA3}, {0xF4, 0xB0}, {0xB2, 0xBB}, {0xB2, 0xE6}, {0x00, 0x00}, {0xB2, 0xED}, {0xB2, 0xF5}, {0xB2, 0xFC}, {0xF4, 0xB1}, {0xB3, 0xB5}, {0xB3, 0xD8}, {0xB3, 0xDB}, {0xB3, 0xE5}, {0xB3, 0xEE}, {0xB3, 0xFB}, {0xF4, 0xB2}, {0xF4, 0xB3}, {0xB4, 0xC0}, {0xB4, 0xC7}, {0xB4, 0xD0}, {0xB4, 0xDE}, {0xF4, 0xB4}, {0xB5, 0xAA}, {0xF4, 0xB5}, {0xB5, 0xAF}, {0xB5, 0xC4}, {0xB5, 0xE8}, {0xF4, 0xB6}, {0xB7, 0xC2}, {0xB7, 0xE4}, {0xB7, 0xE8}, {0xB7, 0xE7}, {0xF4, 0xB7}, {0xF4, 0xB8}, {0xF4, 0xB9}, {0xB8, 0xCE}, {0xB8, 0xE1}, {0xB8, 0xF5}, {0xB8, 0xF7}, {0xB8, 0xF8}, {0xB8, 0xFC}, {0xB9, 0xAF}, {0xB9, 0xB7}, {0xBA, 0xBE}, {0xBA, 0xDB}, {0xCD, 0xAA}, {0xBA, 0xE1}, {0xF4, 0xBA}, {0xBA, 0xEB}, {0xBB, 0xB3}, {0xBB, 0xB8}, {0xF4, 0xBB}, {0xBB, 0xCA}, {0xF4, 0xBC}, {0xF4, 0xBD}, {0xBB, 0xD0}, {0xBB, 0xDE}, {0xBB, 0xF4}, {0xBB, 0xF5}, {0xBB, 0xF9}, {0xBC, 0xE4}, {0xBC, 0xED}, {0xBC, 0xFE}, {0xF4, 0xBE}, {0xBD, 0xC2}, {0xBD, 0xE7}, {0xF4, 0xBF}, {0xBD, 0xF0}, {0xBE, 0xB0}, {0xBE, 0xAC}, {0xF4, 0xC0}, {0xBE, 0xB3}, {0xBE, 0xBD}, {0xBE, 0xCD}, {0xBE, 0xC9}, {0xBE, 0xE4}, {0xBF, 0xA8}, {0xBF, 0xC9}, {0xC0, 0xC4}, {0xC0, 0xE4}, {0xC0, 0xF4}, {0xC1, 0xA6}, {0xF4, 0xC1}, {0xC1, 0xF5}, {0xC1, 0xFC}, {0xF4, 0xC2}, {0xC1, 0xF8}, {0xC2, 0xAB}, {0xC2, 0xA1}, {0xC2, 0xA5}, {0xF4, 0xC3}, {0xC2, 0xB8}, {0xC2, 0xBA}, {0xF4, 0xC4}, {0xC2, 0xC4}, {0xC2, 0xD2}, {0xC2, 0xD7}, {0xC2, 0xDB}, {0xC2, 0xDE}, {0xC2, 0xED}, {0xC2, 0xF0}, {0xF4, 0xC5}, {0xC3, 0xA1}, {0xC3, 0xB5}, {0xC3, 0xC9}, {0xC3, 0xB9}, {0xF4, 0xC6}, {0xC3, 0xD8}, {0xC3, 0xFE}, {0xF4, 0xC7}, {0xC4, 0xCC}, {0xF4, 0xC8}, {0xC4, 0xD9}, {0xC4, 0xEA}, {0xC4, 0xFD}, {0xF4, 0xC9}, {0xC5, 0xA7}, {0xC5, 0xB5}, {0xC5, 0xB6}, {0xF4, 0xCA}, {0xC5, 0xD5}, {0xC6, 0xB8}, {0xC6, 0xD7}, {0xC6, 0xE0}, {0xC6, 0xEA}, {0xC6, 0xE3}, {0xC7, 0xA1}, {0xC7, 0xAB}, {0xC7, 0xC7}, {0xC7, 0xC3}, {0xC7, 0xCB}, {0xC7, 0xCF}, {0xC7, 0xD9}, {0xF4, 0xCB}, {0xF4, 0xCC}, {0xC7, 0xE6}, {0xC7, 0xEE}, {0xC7, 0xFC}, {0xC7, 0xEB}, {0xC7, 0xF0}, {0xC8, 0xB1}, {0xC8, 0xE5}, {0xC8, 0xF8}, {0xC9, 0xA6}, {0xC9, 0xAB}, {0xC9, 0xAD}, {0xF4, 0xCD}, {0xC9, 0xCA}, {0xC9, 0xD3}, {0xC9, 0xE9}, {0xC9, 0xE3}, {0xC9, 0xFC}, {0xC9, 0xF4}, {0xC9, 0xF5}, {0xF4, 0xCE}, {0xCA, 0xB3}, {0xCA, 0xBD}, {0xCA, 0xEF}, {0xCA, 0xF1}, {0xCB, 0xAE}, {0xF4, 0xCF}, {0xCB, 0xCA}, {0xCB, 0xE6}, {0xCB, 0xEA}, {0xCB, 0xF0}, {0xCB, 0xF4}, {0xCB, 0xEE}, {0xCC, 0xA5}, {0xCB, 0xF9}, {0xCC, 0xAB}, {0xCC, 0xAE}, {0xCC, 0xAD}, {0xCC, 0xB2}, {0xCC, 0xC2}, {0xCC, 0xD0}, {0xCC, 0xD9}, {0xF4, 0xD0}, {0xCD, 0xBB}, {0xF4, 0xD1}, {0xCE, 0xBB}, {0xF4, 0xD2}, {0xCE, 0xBA}, {0xCE, 0xC3}, {0xF4, 0xD3}, {0xCE, 0xF2}, {0xB3, 0xDD}, {0xCF, 0xD5}, {0xCF, 0xE2}, {0xCF, 0xE9}, {0xCF, 0xED}, {0xF4, 0xD4}, {0xF4, 0xD5}, {0xF4, 0xD6}, {0x00, 0x00}, {0xF4, 0xD7}, {0xD0, 0xE5}, {0xF4, 0xD8}, {0xD0, 0xE9}, {0xD1, 0xE8}, {0xF4, 0xD9}, {0xF4, 0xDA}, {0xD1, 0xEC}, {0xD2, 0xBB}, {0xF4, 0xDB}, {0xD3, 0xE1}, {0xD3, 0xE8}, {0xD4, 0xA7}, {0xF4, 0xDC}, {0xF4, 0xDD}, {0xD4, 0xD4}, {0xD4, 0xF2}, {0xD5, 0xAE}, {0xF4, 0xDE}, {0xD7, 0xDE}, {0xF4, 0xDF}, {0xD8, 0xA2}, {0xD8, 0xB7}, {0xD8, 0xC1}, {0xD8, 0xD1}, {0xD8, 0xF4}, {0xD9, 0xC6}, {0xD9, 0xC8}, {0xD9, 0xD1}, {0xF4, 0xE0}, {0xF4, 0xE1}, {0xF4, 0xE2}, {0xF4, 0xE3}, {0xF4, 0xE4}, {0xDC, 0xD3}, {0xDD, 0xC8}, {0xDD, 0xD4}, {0xDD, 0xEA}, {0xDD, 0xFA}, {0xDE, 0xA4}, {0xDE, 0xB0}, {0xF4, 0xE5}, {0xDE, 0xB5}, {0xDE, 0xCB}, {0xF4, 0xE6}, {0xDF, 0xB9}, {0xF4, 0xE7}, {0xDF, 0xC3}, {0xF4, 0xE8}, {0xF4, 0xE9}, {0xE0, 0xD9}, {0xF4, 0xEA}, {0xF4, 0xEB}, {0xE1, 0xE2}, {0xF4, 0xEC}, {0xF4, 0xED}, {0xF4, 0xEE}, {0xE2, 0xC7}, {0xE3, 0xA8}, {0xE3, 0xA6}, {0xE3, 0xA9}, {0xE3, 0xAF}, {0xE3, 0xB0}, {0xE3, 0xAA}, {0xE3, 0xAB}, {0xE3, 0xBC}, {0xE3, 0xC1}, {0xE3, 0xBF}, {0xE3, 0xD5}, {0xE3, 0xD8}, {0xE3, 0xD6}, {0xE3, 0xDF}, {0xE3, 0xE3}, {0xE3, 0xE1}, {0xE3, 0xD4}, {0xE3, 0xE9}, {0xE4, 0xA6}, {0xE3, 0xF1}, {0xE3, 0xF2}, {0xE4, 0xCB}, {0xE4, 0xC1}, {0xE4, 0xC3}, {0xE4, 0xBE}, {0xF4, 0xEF}, {0xE4, 0xC0}, {0xE4, 0xC7}, {0xE4, 0xBF}, {0xE4, 0xE0}, {0xE4, 0xDE}, {0xE4, 0xD1}, {0xF4, 0xF0}, {0xE4, 0xDC}, {0xE4, 0xD2}, {0xE4, 0xDB}, {0xE4, 0xD4}, {0xE4, 0xFA}, {0xE4, 0xEF}, {0xE5, 0xB3}, {0xE5, 0xBF}, {0xE5, 0xC9}, {0xE5, 0xD0}, {0xE5, 0xE2}, {0xE5, 0xEA}, {0xE5, 0xEB}, {0xF4, 0xF1}, {0xF4, 0xF2}, {0xF4, 0xF3}, {0xE6, 0xE8}, {0xE6, 0xEF}, {0xE7, 0xAC}, {0xF4, 0xF4}, {0xE7, 0xAE}, {0xF4, 0xF5}, {0xE7, 0xB1}, {0xF4, 0xF6}, {0xE7, 0xB2}, {0xE8, 0xB1}, {0xE8, 0xB6}, {0xF4, 0xF7}, {0xF4, 0xF8}, {0xE8, 0xDD}, {0xF4, 0xF9}, {0xF4, 0xFA}, {0xE9, 0xD1}, {0xF4, 0xFB}, {0xE9, 0xED}, {0xEA, 0xCD}, {0xF4, 0xFC}, {0xEA, 0xDB}, {0xEA, 0xE6}, {0xEA, 0xEA}, {0xEB, 0xA5}, {0xEB, 0xFB}, {0xEB, 0xFA}, {0xF4, 0xFD}, {0xEC, 0xD6}, {0xF4, 0xFE}, }; #define IS_EUC_IBM2JISX0208(h, l) \ (((h) == 0xA2 && (l) == 0xCC) || ((h) == 0xA2 && (l) == 0xE8)) /* EUC to SJIS IBM extended characters map (G3 JIS X 0212 block) */ static struct { unsigned short euc; unsigned char sjis[2]; } euc2sjisibm_jisx0212_map[] = { {0xA2C3, {0xFA, 0x55}}, {0xB0A9, {0xFA, 0x68}}, {0xB0C8, {0xFA, 0x69}}, {0xB0D2, {0xFA, 0x6B}}, {0xB0D4, {0xFA, 0x6C}}, {0xB0E3, {0xFA, 0x6D}}, {0xB0EE, {0xFA, 0x6E}}, {0xB1A3, {0xFA, 0x70}}, {0xB1A7, {0xFA, 0x6F}}, {0xB1A9, {0xFA, 0x72}}, {0xB1AC, {0xFA, 0x71}}, {0xB1BB, {0xFA, 0x61}}, {0xB1BE, {0xFA, 0x73}}, {0xB1C8, {0xFA, 0x76}}, {0xB1D7, {0xFA, 0x77}}, {0xB1D8, {0xFA, 0x75}}, {0xB1DF, {0xFA, 0x74}}, {0xB1E1, {0xFA, 0x7A}}, {0xB1E3, {0xFA, 0x78}}, {0xB1F4, {0xFA, 0x79}}, {0xB2A3, {0xFA, 0x7B}}, {0xB2BB, {0xFA, 0x7D}}, {0xB2E6, {0xFA, 0x7E}}, {0xB2ED, {0xFA, 0x80}}, {0xB2F5, {0xFA, 0x81}}, {0xB2FC, {0xFA, 0x82}}, {0xB3B5, {0xFA, 0x84}}, {0xB3D8, {0xFA, 0x85}}, {0xB3DB, {0xFA, 0x86}}, {0xB3DD, {0xFB, 0x77}}, {0xB3E5, {0xFA, 0x87}}, {0xB3EE, {0xFA, 0x88}}, {0xB3FB, {0xFA, 0x89}}, {0xB4C0, {0xFA, 0x8C}}, {0xB4C7, {0xFA, 0x8D}}, {0xB4D0, {0xFA, 0x8E}}, {0xB4DE, {0xFA, 0x8F}}, {0xB5AA, {0xFA, 0x91}}, {0xB5AF, {0xFA, 0x93}}, {0xB5C4, {0xFA, 0x94}}, {0xB5E8, {0xFA, 0x95}}, {0xB7C2, {0xFA, 0x97}}, {0xB7E4, {0xFA, 0x98}}, {0xB7E7, {0xFA, 0x9A}}, {0xB7E8, {0xFA, 0x99}}, {0xB8CE, {0xFA, 0x9E}}, {0xB8E1, {0xFA, 0x9F}}, {0xB8F5, {0xFA, 0xA0}}, {0xB8F7, {0xFA, 0xA1}}, {0xB8F8, {0xFA, 0xA2}}, {0xB8FC, {0xFA, 0xA3}}, {0xB9AF, {0xFA, 0xA4}}, {0xB9B7, {0xFA, 0xA5}}, {0xBABE, {0xFA, 0xA6}}, {0xBADB, {0xFA, 0xA7}}, {0xBAE1, {0xFA, 0xA9}}, {0xBAEB, {0xFA, 0xAB}}, {0xBBB3, {0xFA, 0xAC}}, {0xBBB8, {0xFA, 0xAD}}, {0xBBCA, {0xFA, 0xAF}}, {0xBBD0, {0xFA, 0xB2}}, {0xBBDE, {0xFA, 0xB3}}, {0xBBF4, {0xFA, 0xB4}}, {0xBBF5, {0xFA, 0xB5}}, {0xBBF9, {0xFA, 0xB6}}, {0xBCE4, {0xFA, 0xB7}}, {0xBCED, {0xFA, 0xB8}}, {0xBCF4, {0xFA, 0x67}}, {0xBCFE, {0xFA, 0xB9}}, {0xBDC2, {0xFA, 0xBB}}, {0xBDE7, {0xFA, 0xBC}}, {0xBDF0, {0xFA, 0xBE}}, {0xBEAC, {0xFA, 0xC0}}, {0xBEB0, {0xFA, 0xBF}}, {0xBEB3, {0xFA, 0xC2}}, {0xBEBD, {0xFA, 0xC3}}, {0xBEC9, {0xFA, 0xC5}}, {0xBECD, {0xFA, 0xC4}}, {0xBEE4, {0xFA, 0xC6}}, {0xBFA8, {0xFA, 0xC7}}, {0xBFC9, {0xFA, 0xC8}}, {0xC0C4, {0xFA, 0xC9}}, {0xC0E4, {0xFA, 0xCA}}, {0xC0F4, {0xFA, 0xCB}}, {0xC1A6, {0xFA, 0xCC}}, {0xC1F5, {0xFA, 0xCE}}, {0xC1F8, {0xFA, 0xD1}}, {0xC1FC, {0xFA, 0xCF}}, {0xC2A1, {0xFA, 0xD3}}, {0xC2A5, {0xFA, 0xD4}}, {0xC2AB, {0xFA, 0xD2}}, {0xC2AD, {0xFA, 0x63}}, {0xC2B8, {0xFA, 0xD6}}, {0xC2BA, {0xFA, 0xD7}}, {0xC2BF, {0xFA, 0x66}}, {0xC2C4, {0xFA, 0xD9}}, {0xC2D2, {0xFA, 0xDA}}, {0xC2D7, {0xFA, 0xDB}}, {0xC2DB, {0xFA, 0xDC}}, {0xC2DE, {0xFA, 0xDD}}, {0xC2ED, {0xFA, 0xDE}}, {0xC2F0, {0xFA, 0xDF}}, {0xC3A1, {0xFA, 0xE1}}, {0xC3B5, {0xFA, 0xE2}}, {0xC3B9, {0xFA, 0xE4}}, {0xC3C9, {0xFA, 0xE3}}, {0xC3D8, {0xFA, 0xE6}}, {0xC3FC, {0xFA, 0x64}}, {0xC3FE, {0xFA, 0xE7}}, {0xC4CC, {0xFA, 0xE9}}, {0xC4D9, {0xFA, 0xEB}}, {0xC4EA, {0xFA, 0xEC}}, {0xC4FD, {0xFA, 0xED}}, {0xC5A7, {0xFA, 0xEF}}, {0xC5B5, {0xFA, 0xF0}}, {0xC5B6, {0xFA, 0xF1}}, {0xC5D5, {0xFA, 0xF3}}, {0xC6B8, {0xFA, 0xF4}}, {0xC6D7, {0xFA, 0xF5}}, {0xC6E0, {0xFA, 0xF6}}, {0xC6E3, {0xFA, 0xF8}}, {0xC6EA, {0xFA, 0xF7}}, {0xC7A1, {0xFA, 0xF9}}, {0xC7AB, {0xFA, 0xFA}}, {0xC7C3, {0xFA, 0xFC}}, {0xC7C7, {0xFA, 0xFB}}, {0xC7CB, {0xFB, 0x40}}, {0xC7CF, {0xFB, 0x41}}, {0xC7D9, {0xFB, 0x42}}, {0xC7E6, {0xFB, 0x45}}, {0xC7EB, {0xFB, 0x48}}, {0xC7EE, {0xFB, 0x46}}, {0xC7F0, {0xFB, 0x49}}, {0xC7FC, {0xFB, 0x47}}, {0xC8B1, {0xFB, 0x4A}}, {0xC8E5, {0xFB, 0x4B}}, {0xC8F8, {0xFB, 0x4C}}, {0xC9A6, {0xFB, 0x4D}}, {0xC9AB, {0xFB, 0x4E}}, {0xC9AD, {0xFB, 0x4F}}, {0xC9CA, {0xFB, 0x51}}, {0xC9D3, {0xFB, 0x52}}, {0xC9E3, {0xFB, 0x54}}, {0xC9E9, {0xFB, 0x53}}, {0xC9F4, {0xFB, 0x56}}, {0xC9F5, {0xFB, 0x57}}, {0xC9FC, {0xFB, 0x55}}, {0xCAB3, {0xFB, 0x59}}, {0xCABD, {0xFB, 0x5A}}, {0xCAEF, {0xFB, 0x5B}}, {0xCAF1, {0xFB, 0x5C}}, {0xCBAE, {0xFB, 0x5D}}, {0xCBCA, {0xFB, 0x5F}}, {0xCBE6, {0xFB, 0x60}}, {0xCBEA, {0xFB, 0x61}}, {0xCBEE, {0xFB, 0x64}}, {0xCBF0, {0xFB, 0x62}}, {0xCBF4, {0xFB, 0x63}}, {0xCBF9, {0xFB, 0x66}}, {0xCCA5, {0xFB, 0x65}}, {0xCCAB, {0xFB, 0x67}}, {0xCCAD, {0xFB, 0x69}}, {0xCCAE, {0xFB, 0x68}}, {0xCCB2, {0xFB, 0x6A}}, {0xCCC2, {0xFB, 0x6B}}, {0xCCD0, {0xFB, 0x6C}}, {0xCCD9, {0xFB, 0x6D}}, {0xCDAA, {0xFA, 0xA8}}, {0xCDBB, {0xFB, 0x6F}}, {0xCEBA, {0xFB, 0x73}}, {0xCEBB, {0xFB, 0x71}}, {0xCEC3, {0xFB, 0x74}}, {0xCEF2, {0xFB, 0x76}}, {0xCFD5, {0xFB, 0x78}}, {0xCFE2, {0xFB, 0x79}}, {0xCFE9, {0xFB, 0x7A}}, {0xCFED, {0xFB, 0x7B}}, {0xD0E5, {0xFB, 0x81}}, {0xD0E9, {0xFB, 0x83}}, {0xD1E8, {0xFB, 0x84}}, {0xD1EC, {0xFB, 0x87}}, {0xD2BB, {0xFB, 0x88}}, {0xD3E1, {0xFB, 0x8A}}, {0xD3E8, {0xFB, 0x8B}}, {0xD4A7, {0xFB, 0x8C}}, {0xD4D4, {0xFB, 0x8F}}, {0xD4E3, {0xFA, 0x5C}}, {0xD4F2, {0xFB, 0x90}}, {0xD5AE, {0xFB, 0x91}}, {0xD7DE, {0xFB, 0x93}}, {0xD8A2, {0xFB, 0x95}}, {0xD8B7, {0xFB, 0x96}}, {0xD8C1, {0xFB, 0x97}}, {0xD8D1, {0xFB, 0x98}}, {0xD8F4, {0xFB, 0x99}}, {0xD9A1, {0xFA, 0x60}}, {0xD9C6, {0xFB, 0x9A}}, {0xD9C8, {0xFB, 0x9B}}, {0xD9D1, {0xFB, 0x9C}}, {0xDCD3, {0xFB, 0xA2}}, {0xDCDF, {0xFA, 0x5D}}, {0xDDC8, {0xFB, 0xA3}}, {0xDDD4, {0xFB, 0xA4}}, {0xDDEA, {0xFB, 0xA5}}, {0xDDFA, {0xFB, 0xA6}}, {0xDEA4, {0xFB, 0xA7}}, {0xDEB0, {0xFB, 0xA8}}, {0xDEB5, {0xFB, 0xAA}}, {0xDECB, {0xFB, 0xAB}}, {0xDFB9, {0xFB, 0xAD}}, {0xDFC3, {0xFB, 0xAF}}, {0xE0D9, {0xFB, 0xB2}}, {0xE1E2, {0xFB, 0xB5}}, {0xE2C7, {0xFB, 0xB9}}, {0xE3A6, {0xFB, 0xBB}}, {0xE3A8, {0xFB, 0xBA}}, {0xE3A9, {0xFB, 0xBC}}, {0xE3AA, {0xFB, 0xBF}}, {0xE3AB, {0xFB, 0xC0}}, {0xE3AF, {0xFB, 0xBD}}, {0xE3B0, {0xFB, 0xBE}}, {0xE3BC, {0xFB, 0xC1}}, {0xE3BF, {0xFB, 0xC3}}, {0xE3C1, {0xFB, 0xC2}}, {0xE3D4, {0xFB, 0xCA}}, {0xE3D5, {0xFB, 0xC4}}, {0xE3D6, {0xFB, 0xC6}}, {0xE3D8, {0xFB, 0xC5}}, {0xE3DF, {0xFB, 0xC7}}, {0xE3E1, {0xFB, 0xC9}}, {0xE3E3, {0xFB, 0xC8}}, {0xE3E9, {0xFB, 0xCB}}, {0xE3F1, {0xFB, 0xCD}}, {0xE3F2, {0xFB, 0xCE}}, {0xE3F8, {0xFA, 0x5F}}, {0xE4A6, {0xFB, 0xCC}}, {0xE4BE, {0xFB, 0xD2}}, {0xE4BF, {0xFB, 0xD6}}, {0xE4C0, {0xFB, 0xD4}}, {0xE4C1, {0xFB, 0xD0}}, {0xE4C3, {0xFB, 0xD1}}, {0xE4C7, {0xFB, 0xD5}}, {0xE4CB, {0xFB, 0xCF}}, {0xE4D0, {0xFA, 0x65}}, {0xE4D1, {0xFB, 0xD9}}, {0xE4D2, {0xFB, 0xDC}}, {0xE4D4, {0xFB, 0xDE}}, {0xE4DB, {0xFB, 0xDD}}, {0xE4DC, {0xFB, 0xDB}}, {0xE4DE, {0xFB, 0xD8}}, {0xE4E0, {0xFB, 0xD7}}, {0xE4E9, {0xFA, 0x5E}}, {0xE4EF, {0xFB, 0xE0}}, {0xE4FA, {0xFB, 0xDF}}, {0xE5B3, {0xFB, 0xE1}}, {0xE5BF, {0xFB, 0xE2}}, {0xE5C9, {0xFB, 0xE3}}, {0xE5D0, {0xFB, 0xE4}}, {0xE5E2, {0xFB, 0xE5}}, {0xE5EA, {0xFB, 0xE6}}, {0xE5EB, {0xFB, 0xE7}}, {0xE6E8, {0xFB, 0xEB}}, {0xE6EF, {0xFB, 0xEC}}, {0xE7AC, {0xFB, 0xED}}, {0xE7AE, {0xFB, 0xEF}}, {0xE7B1, {0xFB, 0xF1}}, {0xE7B2, {0xFB, 0xF3}}, {0xE8B1, {0xFB, 0xF4}}, {0xE8B6, {0xFB, 0xF5}}, {0xE8DD, {0xFB, 0xF8}}, {0xE9D1, {0xFB, 0xFB}}, {0xE9ED, {0xFC, 0x40}}, {0xEACD, {0xFC, 0x41}}, {0xEADB, {0xFC, 0x43}}, {0xEAE6, {0xFC, 0x44}}, {0xEAEA, {0xFC, 0x45}}, {0xEBA5, {0xFC, 0x46}}, {0xEBFA, {0xFC, 0x48}}, {0xEBFB, {0xFC, 0x47}}, {0xECD6, {0xFC, 0x4A}}, }; /* EUC to SJIS IBM extended characters map (G3 Upper block) */ static const unsigned char euc2sjisibm_g3upper_map[][2] = { {0xFA, 0x40}, {0xFA, 0x41}, {0xFA, 0x42}, {0xFA, 0x43}, {0xFA, 0x44}, {0xFA, 0x45}, {0xFA, 0x46}, {0xFA, 0x47}, {0xFA, 0x48}, {0xFA, 0x49}, {0xFA, 0x4A}, {0xFA, 0x4B}, {0xFA, 0x4C}, {0xFA, 0x4D}, {0xFA, 0x4E}, {0xFA, 0x4F}, {0xFA, 0x50}, {0xFA, 0x51}, {0xFA, 0x52}, {0xFA, 0x53}, {0xFA, 0x56}, {0xFA, 0x57}, {0xFA, 0x58}, {0xFA, 0x59}, {0xFA, 0x5A}, {0xFA, 0x62}, {0xFA, 0x6A}, {0xFA, 0x7C}, {0xFA, 0x83}, {0xFA, 0x8A}, {0xFA, 0x8B}, {0xFA, 0x90}, {0xFA, 0x92}, {0xFA, 0x96}, {0xFA, 0x9B}, {0xFA, 0x9C}, {0xFA, 0x9D}, {0xFA, 0xAA}, {0xFA, 0xAE}, {0xFA, 0xB0}, {0xFA, 0xB1}, {0xFA, 0xBA}, {0xFA, 0xBD}, {0xFA, 0xC1}, {0xFA, 0xCD}, {0xFA, 0xD0}, {0xFA, 0xD5}, {0xFA, 0xD8}, {0xFA, 0xE0}, {0xFA, 0xE5}, {0xFA, 0xE8}, {0xFA, 0xEA}, {0xFA, 0xEE}, {0xFA, 0xF2}, {0xFB, 0x43}, {0xFB, 0x44}, {0xFB, 0x50}, {0xFB, 0x58}, {0xFB, 0x5E}, {0xFB, 0x6E}, {0xFB, 0x70}, {0xFB, 0x72}, {0xFB, 0x75}, {0xFB, 0x7C}, {0xFB, 0x7D}, {0xFB, 0x7E}, {0xFB, 0x80}, {0xFB, 0x82}, {0xFB, 0x85}, {0xFB, 0x86}, {0xFB, 0x89}, {0xFB, 0x8D}, {0xFB, 0x8E}, {0xFB, 0x92}, {0xFB, 0x94}, {0xFB, 0x9D}, {0xFB, 0x9E}, {0xFB, 0x9F}, {0xFB, 0xA0}, {0xFB, 0xA1}, {0xFB, 0xA9}, {0xFB, 0xAC}, {0xFB, 0xAE}, {0xFB, 0xB0}, {0xFB, 0xB1}, {0xFB, 0xB3}, {0xFB, 0xB4}, {0xFB, 0xB6}, {0xFB, 0xB7}, {0xFB, 0xB8}, {0xFB, 0xD3}, {0xFB, 0xDA}, {0xFB, 0xE8}, {0xFB, 0xE9}, {0xFB, 0xEA}, {0xFB, 0xEE}, {0xFB, 0xF0}, {0xFB, 0xF2}, {0xFB, 0xF6}, {0xFB, 0xF7}, {0xFB, 0xF9}, {0xFB, 0xFA}, {0xFB, 0xFC}, {0xFC, 0x42}, {0xFC, 0x49}, {0xFC, 0x4B}, }; static inline int sjisibm2euc(unsigned char *euc, const unsigned char sjis_hi, const unsigned char sjis_lo); static inline int euc2sjisibm_jisx0212(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo); static inline int euc2sjisibm_g3upper(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo); static inline int euc2sjisibm(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo); static inline int sjisnec2sjisibm(unsigned char *sjisibm, const unsigned char sjisnec_hi, const unsigned char sjisnec_lo); /* SJIS IBM extended characters to EUC */ static inline int sjisibm2euc(unsigned char *euc, const unsigned char sjis_hi, const unsigned char sjis_lo) { int index; index = ((sjis_hi - 0xFA) * (0xFD - 0x40)) + (sjis_lo - 0x40); if (IS_EUC_IBM2JISX0208(sjisibm2euc_map[index][0], sjisibm2euc_map[index][1])) { euc[0] = sjisibm2euc_map[index][0]; euc[1] = sjisibm2euc_map[index][1]; return 2; } else { euc[0] = SS3; euc[1] = sjisibm2euc_map[index][0]; euc[2] = sjisibm2euc_map[index][1]; return 3; } } /* EUC to SJIS IBM extended characters (G3 JIS X 0212 block) */ static inline int euc2sjisibm_jisx0212(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo) { int index, min_index, max_index; unsigned short euc; min_index = 0; max_index = ARRAY_SIZE(euc2sjisibm_jisx0212_map) - 1; euc = (euc_hi << 8) | euc_lo; while (min_index <= max_index) { index = (min_index + max_index) / 2; if (euc < euc2sjisibm_jisx0212_map[index].euc) max_index = index - 1; else min_index = index + 1; if (euc == euc2sjisibm_jisx0212_map[index].euc) { sjis[0] = euc2sjisibm_jisx0212_map[index].sjis[0]; sjis[1] = euc2sjisibm_jisx0212_map[index].sjis[1]; return 3; } } return 0; } /* EUC to SJIS IBM extended characters (G3 Upper block) */ static inline int euc2sjisibm_g3upper(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo) { int index; if (euc_hi == 0xF3) index = ((euc_hi << 8) | euc_lo) - 0xF3F3; else index = ((euc_hi << 8) | euc_lo) - 0xF4A1 + 12; if ((index < 0) || (index >= ARRAY_SIZE(euc2sjisibm_g3upper_map))) return 0; sjis[0] = euc2sjisibm_g3upper_map[index][0]; sjis[1] = euc2sjisibm_g3upper_map[index][1]; return 3; } /* EUC to SJIS IBM extended characters (G3 block) */ static inline int euc2sjisibm(unsigned char *sjis, const unsigned char euc_hi, const unsigned char euc_lo) { int n; #if 0 if ((euc_hi == 0xA2) && (euc_lo == 0xCC)) { sjis[0] = 0xFA; sjis[1] = 0x54; return 2; } else if ((euc_hi == 0xA2) && (euc_lo == 0xE8)) { sjis[0] = 0xFA; sjis[1] = 0x5B; return 2; } #endif if ((n = euc2sjisibm_g3upper(sjis, euc_hi, euc_lo))) { return n; } else if ((n = euc2sjisibm_jisx0212(sjis, euc_hi, euc_lo))) { return n; } return 0; } /* NEC/IBM extended characters to IBM extended characters */ static inline int sjisnec2sjisibm(unsigned char *sjisibm, const unsigned char sjisnec_hi, const unsigned char sjisnec_lo) { int count; if (! IS_SJIS_NECIBM(sjisnec_hi, sjisnec_lo)) return 0; if ((sjisnec_hi == 0xEE) && (sjisnec_lo == 0xF9)) { sjisibm[0] = 0x81; sjisibm[1] = 0xCA; return 2; } if ((sjisnec_hi == 0xEE) && (sjisnec_lo >= 0xEF)) { count = (sjisnec_hi << 8 | sjisnec_lo) - (sjisnec_lo <= 0xF9 ? 0xEEEF : (0xEEEF - 10)); } else { count = (sjisnec_hi - 0xED) * (0xFC - 0x40) + (sjisnec_lo - 0x40) + (0x5C - 0x40); if (sjisnec_lo >= 0x7F) count--; } sjisibm[0] = 0xFA + (count / (0xFC - 0x40)); sjisibm[1] = 0x40 + (count % (0xFC - 0x40)); if (sjisibm[1] >= 0x7F) sjisibm[1]++; return 2; } static int uni2char(const wchar_t uni, unsigned char *out, int boundlen) { int n; if (!p_nls) return -EINVAL; if ((n = p_nls->uni2char(uni, out, boundlen)) < 0) return n; /* translate SJIS into EUC-JP */ if (n == 1) { if (IS_SJIS_JISX0201KANA(out[0])) { /* JIS X 0201 KANA */ if (boundlen < 2) return -ENAMETOOLONG; out[1] = out[0]; out[0] = SS2; return 2; } } else if (n == 2) { /* NEC/IBM extended characters to IBM extended characters */ sjisnec2sjisibm(out, out[0], out[1]); if (IS_SJIS_UDC_LOW(out[0], out[1])) { /* User defined characters half low */ MAP_SJIS2EUC(out[0], out[1], 0xF0, out[0], out[1], 0xF5); } else if (IS_SJIS_UDC_HI(out[0], out[1])) { /* User defined characters half high */ unsigned char ch, cl; if (boundlen < 3) return -ENAMETOOLONG; n = 3; ch = out[0]; cl = out[1]; out[0] = SS3; MAP_SJIS2EUC(ch, cl, 0xF5, out[1], out[2], 0xF5); } else if (IS_SJIS_IBM(out[0], out[1])) { /* IBM extended characters */ unsigned char euc[3], i; n = sjisibm2euc(euc, out[0], out[1]); if (boundlen < n) return -ENAMETOOLONG; for (i = 0; i < n; i++) out[i] = euc[i]; } else if (IS_SJIS_JISX0208(out[0], out[1])) { /* JIS X 0208 (include NEC special characters) */ out[0] = (out[0]^0xA0)*2 + 0x5F; if (out[1] > 0x9E) out[0]++; if (out[1] < 0x7F) out[1] = out[1] + 0x61; else if (out[1] < 0x9F) out[1] = out[1] + 0x60; else out[1] = out[1] + 0x02; } else { /* Invalid characters */ return -EINVAL; } } else return -EINVAL; return n; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { unsigned char sjis_temp[2]; int euc_offset, n; if ( !p_nls ) return -EINVAL; if (boundlen <= 0) return -ENAMETOOLONG; /* translate EUC-JP into SJIS */ if (rawstring[0] > 0x7F) { if (rawstring[0] == SS3) { if (boundlen < 3) return -EINVAL; euc_offset = 3; if (IS_EUC_UDC_HI(rawstring[1], rawstring[2])) { /* User defined characters half high */ MAP_EUC2SJIS(rawstring[1], rawstring[2], 0xF5, sjis_temp[0], sjis_temp[1], 0xF5); } else if (euc2sjisibm(sjis_temp,rawstring[1],rawstring[2])) { /* IBM extended characters */ } else { /* JIS X 0212 and Invalid characters*/ return -EINVAL; /* 'GETA' with SJIS coding */ /* sjis_temp[0] = 0x81; */ /* sjis_temp[1] = 0xAC; */ } } else { if (boundlen < 2) return -EINVAL; euc_offset = 2; if (IS_EUC_JISX0201KANA(rawstring[0], rawstring[1])) { /* JIS X 0201 KANA */ sjis_temp[0] = rawstring[1]; sjis_temp[1] = 0x00; } else if (IS_EUC_UDC_LOW(rawstring[0], rawstring[1])) { /* User defined characters half low */ MAP_EUC2SJIS(rawstring[0], rawstring[1], 0xF5, sjis_temp[0], sjis_temp[1], 0xF0); } else if (IS_EUC_JISX0208(rawstring[0], rawstring[1])) { /* JIS X 0208 (include NEC spesial characters) */ sjis_temp[0] = ((rawstring[0]-0x5f)/2) ^ 0xA0; if (!(rawstring[0] & 1)) sjis_temp[1] = rawstring[1] - 0x02; else if (rawstring[1] < 0xE0) sjis_temp[1] = rawstring[1] - 0x61; else sjis_temp[1] = rawstring[1] - 0x60; } else { /* Invalid characters */ return -EINVAL; } } } else { euc_offset = 1; /* JIS X 0201 ROMAJI */ sjis_temp[0] = rawstring[0]; sjis_temp[1] = 0x00; } if ( (n = p_nls->char2uni(sjis_temp, sizeof(sjis_temp), uni)) < 0) return n; return euc_offset; } static struct nls_table table = { .charset = "euc-jp", .uni2char = uni2char, .char2uni = char2uni, .owner = THIS_MODULE, }; static int __init init_nls_euc_jp(void) { p_nls = load_nls("cp932"); if (p_nls) { table.charset2upper = p_nls->charset2upper; table.charset2lower = p_nls->charset2lower; return register_nls(&table); } return -EINVAL; } static void __exit exit_nls_euc_jp(void) { unregister_nls(&table); unload_nls(p_nls); } module_init(init_nls_euc_jp) module_exit(exit_nls_euc_jp) MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
rperier/linux-rockchip
drivers/media/platform/vsp1/vsp1_lut.c
244
5861
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_lut.c -- R-Car VSP1 Look-Up Table * * Copyright (C) 2013 Renesas Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_lut.h" #define LUT_MIN_SIZE 4U #define LUT_MAX_SIZE 8190U #define LUT_SIZE 256 /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_lut_write(struct vsp1_lut *lut, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg, data); } /* ----------------------------------------------------------------------------- * Controls */ #define V4L2_CID_VSP1_LUT_TABLE (V4L2_CID_USER_BASE | 0x1001) static int lut_set_table(struct vsp1_lut *lut, struct v4l2_ctrl *ctrl) { struct vsp1_dl_body *dlb; unsigned int i; dlb = vsp1_dl_body_get(lut->pool); if (!dlb) return -ENOMEM; for (i = 0; i < LUT_SIZE; ++i) vsp1_dl_body_write(dlb, VI6_LUT_TABLE + 4 * i, ctrl->p_new.p_u32[i]); spin_lock_irq(&lut->lock); swap(lut->lut, dlb); spin_unlock_irq(&lut->lock); vsp1_dl_body_put(dlb); return 0; } static int lut_s_ctrl(struct v4l2_ctrl *ctrl) { struct vsp1_lut *lut = container_of(ctrl->handler, struct vsp1_lut, ctrls); switch (ctrl->id) { case V4L2_CID_VSP1_LUT_TABLE: lut_set_table(lut, ctrl); break; } return 0; } static const struct v4l2_ctrl_ops lut_ctrl_ops = { .s_ctrl = lut_s_ctrl, }; static const struct v4l2_ctrl_config lut_table_control = { .ops = &lut_ctrl_ops, .id = V4L2_CID_VSP1_LUT_TABLE, .name = "Look-Up Table", .type = V4L2_CTRL_TYPE_U32, .min = 0x00000000, .max = 0x00ffffff, .step = 1, .def = 0, .dims = { LUT_SIZE }, }; /* ----------------------------------------------------------------------------- * V4L2 Subdevice Pad Operations */ static const unsigned int lut_codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AHSV8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; static int lut_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code) { return vsp1_subdev_enum_mbus_code(subdev, cfg, code, lut_codes, ARRAY_SIZE(lut_codes)); } static int lut_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_frame_size_enum *fse) { return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LUT_MIN_SIZE, LUT_MIN_SIZE, LUT_MAX_SIZE, LUT_MAX_SIZE); } static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { return vsp1_subdev_set_pad_format(subdev, cfg, fmt, lut_codes, ARRAY_SIZE(lut_codes), LUT_MIN_SIZE, LUT_MIN_SIZE, LUT_MAX_SIZE, LUT_MAX_SIZE); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static const struct v4l2_subdev_pad_ops lut_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = lut_enum_mbus_code, .enum_frame_size = lut_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = lut_set_format, }; static const struct v4l2_subdev_ops lut_ops = { .pad = &lut_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void lut_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_lut *lut = to_lut(&entity->subdev); vsp1_lut_write(lut, dlb, VI6_LUT_CTRL, VI6_LUT_CTRL_EN); } static void lut_configure_frame(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_lut *lut = to_lut(&entity->subdev); struct vsp1_dl_body *lut_dlb; unsigned long flags; spin_lock_irqsave(&lut->lock, flags); lut_dlb = lut->lut; lut->lut = NULL; spin_unlock_irqrestore(&lut->lock, flags); if (lut_dlb) { vsp1_dl_list_add_body(dl, lut_dlb); /* Release our local reference. */ vsp1_dl_body_put(lut_dlb); } } static void lut_destroy(struct vsp1_entity *entity) { struct vsp1_lut *lut = to_lut(&entity->subdev); vsp1_dl_body_pool_destroy(lut->pool); } static const struct vsp1_entity_operations lut_entity_ops = { .configure_stream = lut_configure_stream, .configure_frame = lut_configure_frame, .destroy = lut_destroy, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1) { struct vsp1_lut *lut; int ret; lut = devm_kzalloc(vsp1->dev, sizeof(*lut), GFP_KERNEL); if (lut == NULL) return ERR_PTR(-ENOMEM); spin_lock_init(&lut->lock); lut->entity.ops = &lut_entity_ops; lut->entity.type = VSP1_ENTITY_LUT; ret = vsp1_entity_init(vsp1, &lut->entity, "lut", 2, &lut_ops, MEDIA_ENT_F_PROC_VIDEO_LUT); if (ret < 0) return ERR_PTR(ret); /* * Pre-allocate a body pool, with 3 bodies allowing a userspace update * before the hardware has committed a previous set of tables, handling * both the queued and pending dl entries. */ lut->pool = vsp1_dl_body_pool_create(vsp1, 3, LUT_SIZE, 0); if (!lut->pool) return ERR_PTR(-ENOMEM); /* Initialize the control handler. */ v4l2_ctrl_handler_init(&lut->ctrls, 1); v4l2_ctrl_new_custom(&lut->ctrls, &lut_table_control, NULL); lut->entity.subdev.ctrl_handler = &lut->ctrls; if (lut->ctrls.error) { dev_err(vsp1->dev, "lut: failed to initialize controls\n"); ret = lut->ctrls.error; vsp1_entity_destroy(&lut->entity); return ERR_PTR(ret); } v4l2_ctrl_handler_setup(&lut->ctrls); return lut; }
gpl-2.0
kannu1994/sgs2_kernel
drivers/gpu/drm/i915/intel_sdvo.c
244
80814
/* * Copyright 2006 Dave Airlie <airlied@linux.ie> * Copyright © 2006-2007 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" #include "drm_edid.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_sdvo_regs.h" #define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) #define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) #define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) #define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ SDVO_TV_MASK) #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) static const char *tv_format_names[] = { "NTSC_M" , "NTSC_J" , "NTSC_443", "PAL_B" , "PAL_D" , "PAL_G" , "PAL_H" , "PAL_I" , "PAL_M" , "PAL_N" , "PAL_NC" , "PAL_60" , "SECAM_B" , "SECAM_D" , "SECAM_G" , "SECAM_K" , "SECAM_K1", "SECAM_L" , "SECAM_60" }; #define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) struct intel_sdvo { struct intel_encoder base; struct i2c_adapter *i2c; u8 slave_addr; struct i2c_adapter ddc; /* Register for the SDVO device: SDVOB or SDVOC */ int sdvo_reg; /* Active outputs controlled by this SDVO output */ uint16_t controlled_output; /* * Capabilities of the SDVO device returned by * i830_sdvo_get_capabilities() */ struct intel_sdvo_caps caps; /* Pixel clock limitations reported by the SDVO device, in kHz */ int pixel_clock_min, pixel_clock_max; /* * For multiple function SDVO device, * this is for current attached outputs. */ uint16_t attached_output; /** * This is used to select the color range of RBG outputs in HDMI mode. * It is only valid when using TMDS encoding and 8 bit per color mode. */ uint32_t color_range; /** * This is set if we're going to treat the device as TV-out. * * While we have these nice friendly flags for output types that ought * to decide this for us, the S-Video output on our HDMI+S-Video card * shows up as RGB1 (VGA). */ bool is_tv; /* This is for current tv format name */ int tv_format_index; /** * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; bool has_hdmi_monitor; bool has_hdmi_audio; /** * This is set if we detect output of sdvo device as LVDS and * have a valid fixed mode to use with the panel. */ bool is_lvds; /** * This is sdvo fixed pannel mode pointer */ struct drm_display_mode *sdvo_lvds_fixed_mode; /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; /* Input timings for adjusted_mode */ struct intel_sdvo_dtd input_dtd; }; struct intel_sdvo_connector { struct intel_connector base; /* Mark the type of connector */ uint16_t output_flag; int force_audio; /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; struct drm_property *tv_format; /* add the property for the SDVO-TV */ struct drm_property *left; struct drm_property *right; struct drm_property *top; struct drm_property *bottom; struct drm_property *hpos; struct drm_property *vpos; struct drm_property *contrast; struct drm_property *saturation; struct drm_property *hue; struct drm_property *sharpness; struct drm_property *flicker_filter; struct drm_property *flicker_filter_adaptive; struct drm_property *flicker_filter_2d; struct drm_property *tv_chroma_filter; struct drm_property *tv_luma_filter; struct drm_property *dot_crawl; /* add the property for the SDVO-TV/LVDS */ struct drm_property *brightness; /* Add variable to record current setting for the above property */ u32 left_margin, right_margin, top_margin, bottom_margin; /* this is to get the range of margin.*/ u32 max_hscan, max_vscan; u32 max_hpos, cur_hpos; u32 max_vpos, cur_vpos; u32 cur_brightness, max_brightness; u32 cur_contrast, max_contrast; u32 cur_saturation, max_saturation; u32 cur_hue, max_hue; u32 cur_sharpness, max_sharpness; u32 cur_flicker_filter, max_flicker_filter; u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive; u32 cur_flicker_filter_2d, max_flicker_filter_2d; u32 cur_tv_chroma_filter, max_tv_chroma_filter; u32 cur_tv_luma_filter, max_tv_luma_filter; u32 cur_dot_crawl, max_dot_crawl; }; static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) { return container_of(encoder, struct intel_sdvo, base.base); } static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) { return container_of(intel_attached_encoder(connector), struct intel_sdvo, base); } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) { return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base); } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type); static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector); /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). */ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 bval = val, cval = val; int i; if (intel_sdvo->sdvo_reg == PCH_SDVOB) { I915_WRITE(intel_sdvo->sdvo_reg, val); I915_READ(intel_sdvo->sdvo_reg); return; } if (intel_sdvo->sdvo_reg == SDVOB) { cval = I915_READ(SDVOC); } else { bval = I915_READ(SDVOB); } /* * Write the registers twice for luck. Sometimes, * writing them only once doesn't appear to 'stick'. * The BIOS does this too. Yay, magic */ for (i = 0; i < 2; i++) { I915_WRITE(SDVOB, bval); I915_READ(SDVOB); I915_WRITE(SDVOC, cval); I915_READ(SDVOC); } } static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { struct i2c_msg msgs[] = { { .addr = intel_sdvo->slave_addr, .flags = 0, .len = 1, .buf = &addr, }, { .addr = intel_sdvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = ch, } }; int ret; if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) return true; DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { u8 cmd; const char *name; } sdvo_cmd_names[] = { SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), /* Add the op code for SDVO enhancements */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), /* HDMI op code */ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), }; #define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) #define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { int i; DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(intel_sdvo), cmd); for (i = 0; i < args_len; i++) DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); for (; i < 8; i++) DRM_LOG_KMS(" "); for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { if (cmd == sdvo_cmd_names[i].cmd) { DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name); break; } } if (i == ARRAY_SIZE(sdvo_cmd_names)) DRM_LOG_KMS("(%02X)", cmd); DRM_LOG_KMS("\n"); } static const char *cmd_status_names[] = { "Power on", "Success", "Not supported", "Invalid arg", "Pending", "Target not specified", "Scaling not supported" }; static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { u8 buf[args_len*2 + 2], status; struct i2c_msg msgs[args_len + 3]; int i, ret; intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); for (i = 0; i < args_len; i++) { msgs[i].addr = intel_sdvo->slave_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2 *i; buf[2*i + 0] = SDVO_I2C_ARG_0 - i; buf[2*i + 1] = ((u8*)args)[i]; } msgs[i].addr = intel_sdvo->slave_addr; msgs[i].flags = 0; msgs[i].len = 2; msgs[i].buf = buf + 2*i; buf[2*i + 0] = SDVO_I2C_OPCODE; buf[2*i + 1] = cmd; /* the following two are to read the response */ status = SDVO_I2C_CMD_STATUS; msgs[i+1].addr = intel_sdvo->slave_addr; msgs[i+1].flags = 0; msgs[i+1].len = 1; msgs[i+1].buf = &status; msgs[i+2].addr = intel_sdvo->slave_addr; msgs[i+2].flags = I2C_M_RD; msgs[i+2].len = 1; msgs[i+2].buf = &status; ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); if (ret < 0) { DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); return false; } if (ret != i+3) { /* failure in I2C transfer */ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); return false; } return true; } static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { u8 retry = 5; u8 status; int i; DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); /* * The documentation states that all commands will be * processed within 15µs, and that we need only poll * the status byte a maximum of 3 times in order for the * command to be complete. * * Check 5 times in case the hardware failed to read the docs. */ if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; while (status == SDVO_CMD_STATUS_PENDING && retry--) { udelay(15); if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, &status)) goto log_fail; } if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) DRM_LOG_KMS("(%s)", cmd_status_names[status]); else DRM_LOG_KMS("(??? %d)", status); if (status != SDVO_CMD_STATUS_SUCCESS) goto log_fail; /* Read the command response */ for (i = 0; i < response_len; i++) { if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_RETURN_0 + i, &((u8 *)response)[i])) goto log_fail; DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); } DRM_LOG_KMS("\n"); return true; log_fail: DRM_LOG_KMS("... failed\n"); return false; } static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) { if (mode->clock >= 100000) return 1; else if (mode->clock >= 50000) return 2; else return 4; } static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, u8 ddc_bus) { /* This must be the immediately preceding write before the i2c xfer */ return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &ddc_bus, 1); } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) return false; return intel_sdvo_read_response(intel_sdvo, NULL, 0); } static bool intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len) { if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0)) return false; return intel_sdvo_read_response(intel_sdvo, value, len); } static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_set_target_input_args targets = {0}; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_INPUT, &targets, sizeof(targets)); } /** * Return whether each input is trained. * * This function is making an assumption about the layout of the response, * which should be checked against the docs. */ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2) { struct intel_sdvo_get_trained_inputs_response response; BUILD_BUG_ON(sizeof(response) != 1); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, &response, sizeof(response))) return false; *input_1 = response.input0_trained; *input_2 = response.input1_trained; return true; } static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, int mode) { u8 state = SDVO_ENCODER_STATE_ON; switch (mode) { case DRM_MODE_DPMS_ON: state = SDVO_ENCODER_STATE_ON; break; case DRM_MODE_DPMS_STANDBY: state = SDVO_ENCODER_STATE_STANDBY; break; case DRM_MODE_DPMS_SUSPEND: state = SDVO_ENCODER_STATE_SUSPEND; break; case DRM_MODE_DPMS_OFF: state = SDVO_ENCODER_STATE_OFF; break; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state)); } static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo, int *clock_min, int *clock_max) { struct intel_sdvo_pixel_clock_range clocks; BUILD_BUG_ON(sizeof(clocks) != 4); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, &clocks, sizeof(clocks))) return false; /* Convert the values from units of 10 kHz to kHz. */ *clock_min = clocks.min * 10; *clock_max = clocks.max * 10; return true; } static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo, u16 outputs) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, sizeof(outputs)); } static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { return intel_sdvo_set_timing(intel_sdvo, SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); } static bool intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, uint16_t clock, uint16_t width, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; memset(&args, 0, sizeof(args)); args.clock = clock; args.width = width; args.height = height; args.interlace = 0; if (intel_sdvo->is_lvds && (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) args.scaled = 1; return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); } static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { BUILD_BUG_ON(sizeof(dtd->part1) != 8); BUILD_BUG_ON(sizeof(dtd->part2) != 8); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, &dtd->part2, sizeof(dtd->part2)); } static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); } static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, const struct drm_display_mode *mode) { uint16_t width, height; uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; uint16_t h_sync_offset, v_sync_offset; int mode_clock; width = mode->crtc_hdisplay; height = mode->crtc_vdisplay; /* do some mode translations */ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start; h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start; v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; mode_clock = mode->clock; mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; mode_clock /= 10; dtd->part1.clock = mode_clock; dtd->part1.h_active = width & 0xff; dtd->part1.h_blank = h_blank_len & 0xff; dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | ((h_blank_len >> 8) & 0xf); dtd->part1.v_active = height & 0xff; dtd->part1.v_blank = v_blank_len & 0xff; dtd->part1.v_high = (((height >> 8) & 0xf) << 4) | ((v_blank_len >> 8) & 0xf); dtd->part2.h_sync_off = h_sync_offset & 0xff; dtd->part2.h_sync_width = h_sync_len & 0xff; dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | (v_sync_len & 0xf); dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4); dtd->part2.dtd_flags = 0x18; if (mode->flags & DRM_MODE_FLAG_PHSYNC) dtd->part2.dtd_flags |= 0x2; if (mode->flags & DRM_MODE_FLAG_PVSYNC) dtd->part2.dtd_flags |= 0x4; dtd->part2.sdvo_flags = 0; dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; dtd->part2.reserved = 0; } static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, const struct intel_sdvo_dtd *dtd) { mode->hdisplay = dtd->part1.h_active; mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; mode->htotal = mode->hdisplay + dtd->part1.h_blank; mode->htotal += (dtd->part1.h_high & 0xf) << 8; mode->vdisplay = dtd->part1.v_active; mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; mode->vsync_start = mode->vdisplay; mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; mode->vsync_end = mode->vsync_start + (dtd->part2.v_sync_off_width & 0xf); mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; mode->vtotal = mode->vdisplay + dtd->part1.v_blank; mode->vtotal += (dtd->part1.v_high & 0xf) << 8; mode->clock = dtd->part1.clock * 10; mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); if (dtd->part2.dtd_flags & 0x2) mode->flags |= DRM_MODE_FLAG_PHSYNC; if (dtd->part2.dtd_flags & 0x4) mode->flags |= DRM_MODE_FLAG_PVSYNC; } static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_encode encode; BUILD_BUG_ON(sizeof(encode) != 2); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPP_ENCODE, &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, uint8_t mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1); } static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, uint8_t mode) { return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { int i, j; uint8_t set_buf_index[2]; uint8_t av_split; uint8_t buf_size; uint8_t buf[48]; uint8_t *pos; intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1); for (i = 0; i <= av_split; i++) { set_buf_index[0] = i; set_buf_index[1] = 0; intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0); intel_sdvo_read_response(encoder, &buf_size, 1); pos = buf; for (j = 0; j <= buf_size; j += 8) { intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA, NULL, 0); intel_sdvo_read_response(encoder, pos, 8); pos += 8; } } } #endif static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) { struct dip_infoframe avi_if = { .type = DIP_TYPE_AVI, .ver = DIP_VERSION_AVI, .len = DIP_LEN_AVI, }; uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; uint8_t set_buf_index[2] = { 1, 0 }; uint64_t *data = (uint64_t *)&avi_if; unsigned i; intel_dip_infoframe_csum(&avi_if); if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return false; for (i = 0; i < sizeof(avi_if); i += 8) { if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) return false; data++; } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_tv_format format; uint32_t format_map; format_map = 1 << intel_sdvo->tv_format_index; memset(&format, 0, sizeof(format)); memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map))); BUILD_BUG_ON(sizeof(format) != 6); return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_TV_FORMAT, &format, sizeof(format)); } static bool intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo, struct drm_display_mode *mode) { struct intel_sdvo_dtd output_dtd; if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return false; intel_sdvo_get_dtd_from_mode(&output_dtd, mode); if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) return false; return true; } static bool intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* Reset the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return false; if (!intel_sdvo_create_preferred_input_timing(intel_sdvo, mode->clock / 10, mode->hdisplay, mode->vdisplay)) return false; if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, &intel_sdvo->input_dtd)) return false; intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); drm_mode_set_crtcinfo(adjusted_mode, 0); return true; } static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); int multiplier; /* We need to construct preferred input timings based on our * output timings. To do that, we have to set the output * timings, even though this isn't really the right place in * the sequence to do it. Oh well. */ if (intel_sdvo->is_tv) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode); } else if (intel_sdvo->is_lvds) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, intel_sdvo->sdvo_lvds_fixed_mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode); } /* Make the CRTC code factor in the SDVO pixel multiplier. The * SDVO device will factor out the multiplier during mode_set. */ multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); return true; } static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd, output_dtd; int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int rate; if (!mode) return; /* First, set the input mapping for the first input to our controlled * output. This is only correct if we're a single-input device, in * which case the first input is the output from the appropriate SDVO * channel on the motherboard. In a two-input device, the first input * will be SDVOB and the second SDVOC. */ in_out.in0 = intel_sdvo->attached_output; in_out.in1 = 0; intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); /* Set the output timings to the screen */ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; /* lvds has a special fixed output timing. */ if (intel_sdvo->is_lvds) intel_sdvo_get_dtd_from_mode(&output_dtd, intel_sdvo->sdvo_lvds_fixed_mode); else intel_sdvo_get_dtd_from_mode(&output_dtd, mode); (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return; if (intel_sdvo->has_hdmi_monitor) { intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); intel_sdvo_set_avi_infoframe(intel_sdvo); } else intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); if (intel_sdvo->is_tv && !intel_sdvo_set_tv_format(intel_sdvo)) return; /* We have tried to get input timing in mode_fixup, and filled into * adjusted_mode. */ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); switch (pixel_multiplier) { default: case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; } if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate)) return; /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { /* The real mode polarity is set by the SDVO commands, using * struct intel_sdvo_dtd. */ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; if (intel_sdvo->is_hdmi) sdvox |= intel_sdvo->color_range; if (INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_BORDER_ENABLE; } else { sdvox = I915_READ(intel_sdvo->sdvo_reg); switch (intel_sdvo->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; break; case SDVOC: sdvox &= SDVOC_PRESERVE_MASK; break; } sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; } if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; if (intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; if (INTEL_INFO(dev)->gen >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 temp; if (mode != DRM_MODE_DPMS_ON) { intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); if (mode == DRM_MODE_DPMS_OFF) { temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) != 0) { intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); } } } else { bool input1, input2; int i; u8 status; temp = I915_READ(intel_sdvo->sdvo_reg); if ((temp & SDVO_ENABLE) == 0) intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); for (i = 0; i < 2; i++) intel_wait_for_vblank(dev, intel_crtc->pipe); status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* Warn if the device reported failure to sync. * A lot of SDVO devices fail to notify of sync, but it's * a given it the status is a success, we succeeded. */ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { DRM_DEBUG_KMS("First %s output reported failure to " "sync\n", SDVO_NAME(intel_sdvo)); } if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); } return; } static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (intel_sdvo->pixel_clock_min > mode->clock) return MODE_CLOCK_LOW; if (intel_sdvo->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; if (intel_sdvo->is_lvds) { if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) return MODE_PANEL; if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) return MODE_PANEL; } return MODE_OK; } static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { BUILD_BUG_ON(sizeof(*caps) != 8); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps))) return false; DRM_DEBUG_KMS("SDVO capabilities:\n" " vendor_id: %d\n" " device_id: %d\n" " device_rev_id: %d\n" " sdvo_version_major: %d\n" " sdvo_version_minor: %d\n" " sdvo_inputs_mask: %d\n" " smooth_scaling: %d\n" " sharp_scaling: %d\n" " up_scaling: %d\n" " down_scaling: %d\n" " stall_support: %d\n" " output_flags: %d\n", caps->vendor_id, caps->device_id, caps->device_rev_id, caps->sdvo_version_major, caps->sdvo_version_minor, caps->sdvo_inputs_mask, caps->smooth_scaling, caps->sharp_scaling, caps->up_scaling, caps->down_scaling, caps->stall_support, caps->output_flags); return true; } /* No use! */ #if 0 struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) { struct drm_connector *connector = NULL; struct intel_sdvo *iout = NULL; struct intel_sdvo *sdvo; /* find the sdvo connector */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { iout = to_intel_sdvo(connector); if (iout->type != INTEL_OUTPUT_SDVO) continue; sdvo = iout->dev_priv; if (sdvo->sdvo_reg == SDVOB && sdvoB) return connector; if (sdvo->sdvo_reg == SDVOC && !sdvoB) return connector; } return NULL; } int intel_sdvo_supports_hotplug(struct drm_connector *connector) { u8 response[2]; u8 status; struct intel_sdvo *intel_sdvo; DRM_DEBUG_KMS("\n"); if (!connector) return 0; intel_sdvo = to_intel_sdvo(connector); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &response, 2) && response[0]; } void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) { u8 response[2]; u8 status; struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); intel_sdvo_read_response(intel_sdvo, &response, 2); if (on) { intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); status = intel_sdvo_read_response(intel_sdvo, &response, 2); intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } else { response[0] = 0; response[1] = 0; intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); } intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); intel_sdvo_read_response(intel_sdvo, &response, 2); } #endif static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) { /* Is there more than one type of output? */ int caps = intel_sdvo->caps.output_flags & 0xf; return caps & -caps; } static struct edid * intel_sdvo_get_edid(struct drm_connector *connector) { struct intel_sdvo *sdvo = intel_attached_sdvo(connector); return drm_get_edid(connector, &sdvo->ddc); } /* Mac mini hack -- use the same DDC as the analog connector */ static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); } enum drm_connector_status intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); enum drm_connector_status status; struct edid *edid; edid = intel_sdvo_get_edid(connector); if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { u8 ddc, saved_ddc = intel_sdvo->ddc_bus; /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { intel_sdvo->ddc_bus = ddc; edid = intel_sdvo_get_edid(connector); if (edid) break; } /* * If we found the EDID on the other bus, * assume that is the correct DDC bus. */ if (edid == NULL) intel_sdvo->ddc_bus = saved_ddc; } /* * When there is no edid and no monitor is connected with VGA * port, try to use the CRT ddc to read the EDID for DVI-connector. */ if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); status = connector_status_unknown; if (edid != NULL) { /* DDC bus is shared, match EDID to connector type */ if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; if (intel_sdvo->is_hdmi) { intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); } } else status = connector_status_disconnected; connector->display_info.raw_edid = NULL; kfree(edid); } if (status == connector_status_connected) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->force_audio) intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0; } return status; } static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { uint16_t response; struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) return connector_status_unknown; /* add 30ms delay when the output type might be TV */ if (intel_sdvo->caps.output_flags & (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) mdelay(30); if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", response & 0xff, response >> 8, intel_sdvo_connector->output_flag); if (response == 0) return connector_status_disconnected; intel_sdvo->attached_output = response; intel_sdvo->has_hdmi_monitor = false; intel_sdvo->has_hdmi_audio = false; if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; else if (IS_TMDS(intel_sdvo_connector)) ret = intel_sdvo_hdmi_sink_detect(connector); else { struct edid *edid; /* if we have an edid check it matches the connection */ edid = intel_sdvo_get_edid(connector); if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { if (edid->input & DRM_EDID_INPUT_DIGITAL) ret = connector_status_disconnected; else ret = connector_status_connected; connector->display_info.raw_edid = NULL; kfree(edid); } else ret = connector_status_connected; } /* May update encoder flag for like clock for SDVO TV, etc.*/ if (ret == connector_status_connected) { intel_sdvo->is_tv = false; intel_sdvo->is_lvds = false; intel_sdvo->base.needs_tv_clock = false; if (response & SDVO_TV_MASK) { intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; } if (response & SDVO_LVDS_MASK) intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL; } return ret; } static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { struct edid *edid; /* set the bus switch and get the modes */ edid = intel_sdvo_get_edid(connector); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC * link between analog and digital outputs. So, if the regular SDVO * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ if (edid == NULL) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector); if (connector_is_digital == monitor_is_digital) { drm_mode_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); } connector->display_info.raw_edid = NULL; kfree(edid); } } /* * Set of SDVO TV modes. * Note! This is in reply order (see loop in get_tv_modes). * XXX: all 60Hz refresh? */ static const struct drm_display_mode sdvo_tv_modes[] = { { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384, 416, 0, 200, 201, 232, 233, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384, 416, 0, 240, 241, 272, 273, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464, 496, 0, 300, 301, 332, 333, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704, 736, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704, 736, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704, 736, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768, 800, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768, 800, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784, 816, 0, 350, 351, 382, 383, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784, 816, 0, 400, 401, 432, 433, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784, 816, 0, 480, 481, 512, 513, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784, 816, 0, 540, 541, 572, 573, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784, 816, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832, 864, 0, 576, 577, 608, 609, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864, 896, 0, 600, 601, 632, 633, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896, 928, 0, 624, 625, 656, 657, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984, 1016, 0, 766, 767, 798, 799, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088, 1120, 0, 768, 769, 800, 801, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344, 1376, 0, 1024, 1025, 1056, 1057, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; int i; /* Read the list of supported input resolutions for the selected TV * format. */ format_map = 1 << intel_sdvo->tv_format_index; memcpy(&tv_res, &format_map, min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; BUILD_BUG_ON(sizeof(tv_res) != 3); if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res))) return; if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) return; for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) if (reply & (1 << i)) { struct drm_display_mode *nmode; nmode = drm_mode_duplicate(connector->dev, &sdvo_tv_modes[i]); if (nmode) drm_mode_probed_add(connector, nmode); } } static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_display_mode *newmode; /* * Attempt to get the mode list from DDC. * Assume that the preferred modes are * arranged in priority order. */ intel_ddc_get_modes(connector, intel_sdvo->i2c); if (list_empty(&connector->probed_modes) == false) goto end; /* Fetch modes from VBT */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); if (newmode != NULL) { /* Guarantee the mode is preferred */ newmode->type = (DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER); drm_mode_probed_add(connector, newmode); } } end: list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = drm_mode_duplicate(connector->dev, newmode); drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); intel_sdvo->is_lvds = true; break; } } } static int intel_sdvo_get_modes(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (IS_TV(intel_sdvo_connector)) intel_sdvo_get_tv_modes(connector); else if (IS_LVDS(intel_sdvo_connector)) intel_sdvo_get_lvds_modes(connector); else intel_sdvo_get_ddc_modes(connector); return !list_empty(&connector->probed_modes); } static void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_device *dev = connector->dev; if (intel_sdvo_connector->left) drm_property_destroy(dev, intel_sdvo_connector->left); if (intel_sdvo_connector->right) drm_property_destroy(dev, intel_sdvo_connector->right); if (intel_sdvo_connector->top) drm_property_destroy(dev, intel_sdvo_connector->top); if (intel_sdvo_connector->bottom) drm_property_destroy(dev, intel_sdvo_connector->bottom); if (intel_sdvo_connector->hpos) drm_property_destroy(dev, intel_sdvo_connector->hpos); if (intel_sdvo_connector->vpos) drm_property_destroy(dev, intel_sdvo_connector->vpos); if (intel_sdvo_connector->saturation) drm_property_destroy(dev, intel_sdvo_connector->saturation); if (intel_sdvo_connector->contrast) drm_property_destroy(dev, intel_sdvo_connector->contrast); if (intel_sdvo_connector->hue) drm_property_destroy(dev, intel_sdvo_connector->hue); if (intel_sdvo_connector->sharpness) drm_property_destroy(dev, intel_sdvo_connector->sharpness); if (intel_sdvo_connector->flicker_filter) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter); if (intel_sdvo_connector->flicker_filter_2d) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d); if (intel_sdvo_connector->flicker_filter_adaptive) drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive); if (intel_sdvo_connector->tv_luma_filter) drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter); if (intel_sdvo_connector->tv_chroma_filter) drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter); if (intel_sdvo_connector->dot_crawl) drm_property_destroy(dev, intel_sdvo_connector->dot_crawl); if (intel_sdvo_connector->brightness) drm_property_destroy(dev, intel_sdvo_connector->brightness); } static void intel_sdvo_destroy(struct drm_connector *connector) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->tv_format) drm_property_destroy(connector->dev, intel_sdvo_connector->tv_format); intel_sdvo_destroy_enhance_property(connector); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); } static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct edid *edid; bool has_audio = false; if (!intel_sdvo->is_hdmi) return false; edid = intel_sdvo_get_edid(connector); if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); return has_audio; } static int intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; uint16_t temp_value; uint8_t cmd; int ret; ret = drm_connector_property_set_value(connector, property, val); if (ret) return ret; if (property == dev_priv->force_audio_property) { int i = val; bool has_audio; if (i == intel_sdvo_connector->force_audio) return 0; intel_sdvo_connector->force_audio = i; if (i == 0) has_audio = intel_sdvo_detect_hdmi_audio(connector); else has_audio = i > 0; if (has_audio == intel_sdvo->has_hdmi_audio) return 0; intel_sdvo->has_hdmi_audio = has_audio; goto done; } if (property == dev_priv->broadcast_rgb_property) { if (val == !!intel_sdvo->color_range) return 0; intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; goto done; } #define CHECK_PROPERTY(name, NAME) \ if (intel_sdvo_connector->name == property) { \ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \ cmd = SDVO_CMD_SET_##NAME; \ intel_sdvo_connector->cur_##name = temp_value; \ goto set_value; \ } if (property == intel_sdvo_connector->tv_format) { if (val >= TV_FORMAT_NUM) return -EINVAL; if (intel_sdvo->tv_format_index == intel_sdvo_connector->tv_format_supported[val]) return 0; intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val]; goto done; } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { temp_value = val; if (intel_sdvo_connector->left == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->right, val); if (intel_sdvo_connector->left_margin == temp_value) return 0; intel_sdvo_connector->left_margin = temp_value; intel_sdvo_connector->right_margin = temp_value; temp_value = intel_sdvo_connector->max_hscan - intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->right == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->left, val); if (intel_sdvo_connector->right_margin == temp_value) return 0; intel_sdvo_connector->left_margin = temp_value; intel_sdvo_connector->right_margin = temp_value; temp_value = intel_sdvo_connector->max_hscan - intel_sdvo_connector->left_margin; cmd = SDVO_CMD_SET_OVERSCAN_H; goto set_value; } else if (intel_sdvo_connector->top == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->bottom, val); if (intel_sdvo_connector->top_margin == temp_value) return 0; intel_sdvo_connector->top_margin = temp_value; intel_sdvo_connector->bottom_margin = temp_value; temp_value = intel_sdvo_connector->max_vscan - intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } else if (intel_sdvo_connector->bottom == property) { drm_connector_property_set_value(connector, intel_sdvo_connector->top, val); if (intel_sdvo_connector->bottom_margin == temp_value) return 0; intel_sdvo_connector->top_margin = temp_value; intel_sdvo_connector->bottom_margin = temp_value; temp_value = intel_sdvo_connector->max_vscan - intel_sdvo_connector->top_margin; cmd = SDVO_CMD_SET_OVERSCAN_V; goto set_value; } CHECK_PROPERTY(hpos, HPOS) CHECK_PROPERTY(vpos, VPOS) CHECK_PROPERTY(saturation, SATURATION) CHECK_PROPERTY(contrast, CONTRAST) CHECK_PROPERTY(hue, HUE) CHECK_PROPERTY(brightness, BRIGHTNESS) CHECK_PROPERTY(sharpness, SHARPNESS) CHECK_PROPERTY(flicker_filter, FLICKER_FILTER) CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D) CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE) CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER) CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER) CHECK_PROPERTY(dot_crawl, DOT_CRAWL) } return -EINVAL; /* unknown property */ set_value: if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2)) return -EIO; done: if (intel_sdvo->base.base.crtc) { struct drm_crtc *crtc = intel_sdvo->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } return 0; #undef CHECK_PROPERTY } static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { .dpms = intel_sdvo_dpms, .mode_fixup = intel_sdvo_mode_fixup, .prepare = intel_encoder_prepare, .mode_set = intel_sdvo_mode_set, .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, .destroy = intel_sdvo_destroy, }; static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, intel_sdvo->sdvo_lvds_fixed_mode); i2c_del_adapter(&intel_sdvo->ddc); intel_encoder_destroy(encoder); } static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { .destroy = intel_sdvo_enc_destroy, }; static void intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) { uint16_t mask = 0; unsigned int num_bits; /* Make a mask of outputs less than or equal to our own priority in the * list. */ switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; } /* Count bits to find what number we are in the priority list. */ mask &= sdvo->caps.output_flags; num_bits = hweight16(mask); /* If more than 3 outputs, default to DDC bus 3 for now. */ if (num_bits > 3) num_bits = 3; /* Corresponds to SDVO_CONTROL_BUS_DDCx */ sdvo->ddc_bus = 1 << num_bits; } /** * Choose the appropriate DDC bus for control bus switch command for this * SDVO output based on the controlled output. * * DDC bus number assignment is in a priority order of RGB outputs, then TMDS * outputs, then LVDS outputs. */ static void intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; if (IS_SDVOB(reg)) mapping = &(dev_priv->sdvo_mappings[0]); else mapping = &(dev_priv->sdvo_mappings[1]); if (mapping->initialized) sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); else intel_sdvo_guess_ddc_bus(sdvo); } static void intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, struct intel_sdvo *sdvo, u32 reg) { struct sdvo_device_mapping *mapping; u8 pin, speed; if (IS_SDVOB(reg)) mapping = &dev_priv->sdvo_mappings[0]; else mapping = &dev_priv->sdvo_mappings[1]; pin = GMBUS_PORT_DPB; speed = GMBUS_RATE_1MHZ >> 8; if (mapping->initialized) { pin = mapping->i2c_pin; speed = mapping->i2c_speed; } if (pin < GMBUS_NUM_PORTS) { sdvo->i2c = &dev_priv->gmbus[pin].adapter; intel_gmbus_set_speed(sdvo->i2c, speed); intel_gmbus_force_bit(sdvo->i2c, true); } else sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; } static bool intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) { return intel_sdvo_check_supp_encode(intel_sdvo); } static u8 intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct sdvo_device_mapping *my_mapping, *other_mapping; if (IS_SDVOB(sdvo_reg)) { my_mapping = &dev_priv->sdvo_mappings[0]; other_mapping = &dev_priv->sdvo_mappings[1]; } else { my_mapping = &dev_priv->sdvo_mappings[1]; other_mapping = &dev_priv->sdvo_mappings[0]; } /* If the BIOS described our SDVO device, take advantage of it. */ if (my_mapping->slave_addr) return my_mapping->slave_addr; /* If the BIOS only described a different SDVO device, use the * address that it isn't using. */ if (other_mapping->slave_addr) { if (other_mapping->slave_addr == 0x70) return 0x72; else return 0x70; } /* No SDVO device info is found for another DVO port, * so use mapping assumption we had before BIOS parsing. */ if (IS_SDVOB(sdvo_reg)) return 0x70; else return 0x72; } static void intel_sdvo_connector_init(struct intel_sdvo_connector *connector, struct intel_sdvo *encoder) { drm_connector_init(encoder->base.base.dev, &connector->base.base, &intel_sdvo_connector_funcs, connector->base.base.connector_type); drm_connector_helper_add(&connector->base.base, &intel_sdvo_connector_helper_funcs); connector->base.base.interlace_allowed = 0; connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; intel_connector_attach_encoder(&connector->base, &encoder->base); drm_sysfs_connector_add(&connector->base.base); } static void intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) { struct drm_device *dev = connector->base.base.dev; intel_attach_force_audio_property(&connector->base.base); if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) intel_attach_broadcast_rgb_property(&connector->base.base); } static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; } intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (intel_sdvo->is_hdmi) intel_sdvo_add_hdmi_properties(intel_sdvo_connector); return true; } static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; intel_sdvo->controlled_output |= type; intel_sdvo_connector->output_flag = type; intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_sdvo_destroy(connector); return false; } static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; connector->polled = DRM_CONNECTOR_POLL_CONNECT; encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); return true; } static bool intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; if (device == 0) { intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; } else if (device == 1) { intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; err: intel_sdvo_destroy(connector); return false; } static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) { intel_sdvo->is_tv = false; intel_sdvo->base.needs_tv_clock = false; intel_sdvo->is_lvds = false; /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ if (flags & SDVO_OUTPUT_TMDS0) if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; /* TV has no XXX1 function block */ if (flags & SDVO_OUTPUT_SVID0) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0)) return false; if (flags & SDVO_OUTPUT_CVBS0) if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) return false; if (flags & SDVO_OUTPUT_RGB0) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; if (flags & SDVO_OUTPUT_LVDS0) if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; if ((flags & SDVO_OUTPUT_MASK) == 0) { unsigned char bytes[2]; intel_sdvo->controlled_output = 0; memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(intel_sdvo), bytes[0], bytes[1]); return false; } intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1); return true; } static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) { struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; uint32_t format_map, i; if (!intel_sdvo_set_target_output(intel_sdvo, type)) return false; BUILD_BUG_ON(sizeof(format) != 6); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, &format, sizeof(format))) return false; memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format))); if (format_map == 0) return false; intel_sdvo_connector->format_supported_num = 0; for (i = 0 ; i < TV_FORMAT_NUM; i++) if (format_map & (1 << i)) intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i; intel_sdvo_connector->tv_format = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", intel_sdvo_connector->format_supported_num); if (!intel_sdvo_connector->tv_format) return false; for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) drm_property_add_enum( intel_sdvo_connector->tv_format, i, i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; drm_connector_attach_property(&intel_sdvo_connector->base.base, intel_sdvo_connector->tv_format, 0); return true; } #define ENHANCEMENT(name, NAME) do { \ if (enhancements.name) { \ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \ return false; \ intel_sdvo_connector->max_##name = data_value[0]; \ intel_sdvo_connector->cur_##name = response; \ intel_sdvo_connector->name = \ drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \ if (!intel_sdvo_connector->name) return false; \ intel_sdvo_connector->name->values[0] = 0; \ intel_sdvo_connector->name->values[1] = data_value[0]; \ drm_connector_attach_property(connector, \ intel_sdvo_connector->name, \ intel_sdvo_connector->cur_##name); \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ data_value[0], data_value[1], response); \ } \ } while(0) static bool intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; /* when horizontal overscan is supported, Add the left/right property */ if (enhancements.overscan_h) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_H, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_H, &response, 2)) return false; intel_sdvo_connector->max_hscan = data_value[0]; intel_sdvo_connector->left_margin = data_value[0] - response; intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin; intel_sdvo_connector->left = drm_property_create(dev, DRM_MODE_PROP_RANGE, "left_margin", 2); if (!intel_sdvo_connector->left) return false; intel_sdvo_connector->left->values[0] = 0; intel_sdvo_connector->left->values[1] = data_value[0]; drm_connector_attach_property(connector, intel_sdvo_connector->left, intel_sdvo_connector->left_margin); intel_sdvo_connector->right = drm_property_create(dev, DRM_MODE_PROP_RANGE, "right_margin", 2); if (!intel_sdvo_connector->right) return false; intel_sdvo_connector->right->values[0] = 0; intel_sdvo_connector->right->values[1] = data_value[0]; drm_connector_attach_property(connector, intel_sdvo_connector->right, intel_sdvo_connector->right_margin); DRM_DEBUG_KMS("h_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } if (enhancements.overscan_v) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_OVERSCAN_V, &data_value, 4)) return false; if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_OVERSCAN_V, &response, 2)) return false; intel_sdvo_connector->max_vscan = data_value[0]; intel_sdvo_connector->top_margin = data_value[0] - response; intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin; intel_sdvo_connector->top = drm_property_create(dev, DRM_MODE_PROP_RANGE, "top_margin", 2); if (!intel_sdvo_connector->top) return false; intel_sdvo_connector->top->values[0] = 0; intel_sdvo_connector->top->values[1] = data_value[0]; drm_connector_attach_property(connector, intel_sdvo_connector->top, intel_sdvo_connector->top_margin); intel_sdvo_connector->bottom = drm_property_create(dev, DRM_MODE_PROP_RANGE, "bottom_margin", 2); if (!intel_sdvo_connector->bottom) return false; intel_sdvo_connector->bottom->values[0] = 0; intel_sdvo_connector->bottom->values[1] = data_value[0]; drm_connector_attach_property(connector, intel_sdvo_connector->bottom, intel_sdvo_connector->bottom_margin); DRM_DEBUG_KMS("v_overscan: max %d, " "default %d, current %d\n", data_value[0], data_value[1], response); } ENHANCEMENT(hpos, HPOS); ENHANCEMENT(vpos, VPOS); ENHANCEMENT(saturation, SATURATION); ENHANCEMENT(contrast, CONTRAST); ENHANCEMENT(hue, HUE); ENHANCEMENT(sharpness, SHARPNESS); ENHANCEMENT(brightness, BRIGHTNESS); ENHANCEMENT(flicker_filter, FLICKER_FILTER); ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE); ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D); ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER); ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER); if (enhancements.dot_crawl) { if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2)) return false; intel_sdvo_connector->max_dot_crawl = 1; intel_sdvo_connector->cur_dot_crawl = response & 0x1; intel_sdvo_connector->dot_crawl = drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2); if (!intel_sdvo_connector->dot_crawl) return false; intel_sdvo_connector->dot_crawl->values[0] = 0; intel_sdvo_connector->dot_crawl->values[1] = 1; drm_connector_attach_property(connector, intel_sdvo_connector->dot_crawl, intel_sdvo_connector->cur_dot_crawl); DRM_DEBUG_KMS("dot crawl: current %d\n", response); } return true; } static bool intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; ENHANCEMENT(brightness, BRIGHTNESS); return true; } #undef ENHANCEMENT static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector) { union { struct intel_sdvo_enhancements_reply reply; uint16_t response; } enhancements; BUILD_BUG_ON(sizeof(enhancements) != 2); enhancements.response = 0; intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, &enhancements, sizeof(enhancements)); if (enhancements.response == 0) { DRM_DEBUG_KMS("No enhancement is supported\n"); return true; } if (IS_TV(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply); else if(IS_LVDS(intel_sdvo_connector)) return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); else return true; } static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_sdvo *sdvo = adapter->algo_data; if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) return -EIO; return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num); } static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter) { struct intel_sdvo *sdvo = adapter->algo_data; return sdvo->i2c->algo->functionality(sdvo->i2c); } static const struct i2c_algorithm intel_sdvo_ddc_proxy = { .master_xfer = intel_sdvo_ddc_proxy_xfer, .functionality = intel_sdvo_ddc_proxy_func }; static bool intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev) { sdvo->ddc.owner = THIS_MODULE; sdvo->ddc.class = I2C_CLASS_DDC; snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); sdvo->ddc.dev.parent = &dev->pdev->dev; sdvo->ddc.algo_data = sdvo; sdvo->ddc.algo = &intel_sdvo_ddc_proxy; return i2c_add_adapter(&sdvo->ddc) == 0; } bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; int i; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); if (!intel_sdvo) return false; intel_sdvo->sdvo_reg = sdvo_reg; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { kfree(intel_sdvo); return false; } /* encoder type will be decided later */ intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { u8 byte; if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err; } } if (IS_SDVOB(sdvo_reg)) dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; else dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); goto err; } intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) goto err; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", SDVO_NAME(intel_sdvo), intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id, intel_sdvo->caps.device_rev_id, intel_sdvo->pixel_clock_min / 1000, intel_sdvo->pixel_clock_max / 1000, (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', /* check currently supported outputs */ intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', intel_sdvo->caps.output_flags & (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; err: drm_encoder_cleanup(&intel_encoder->base); i2c_del_adapter(&intel_sdvo->ddc); kfree(intel_sdvo); return false; }
gpl-2.0
wendal/rk2918_uzone_f0_top
drivers/platform/x86/sony-laptop.c
500
77089
/* * ACPI Sony Notebook Control Driver (SNC and SPIC) * * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net> * Copyright (C) 2007-2009 Mattia Dongili <malattia@linux.it> * * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c * which are copyrighted by their respective authors. * * The SNY6001 driver part is based on the sonypi driver which includes * material from: * * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net> * * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> * * Copyright (C) 2001-2002 Alcôve <www.alcove.com> * * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> * * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> * * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> * * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> * * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/backlight.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/dmi.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/kfifo.h> #include <linux/workqueue.h> #include <linux/acpi.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> #include <asm/uaccess.h> #include <linux/sonypi.h> #include <linux/sony-laptop.h> #include <linux/rfkill.h> #ifdef CONFIG_SONYPI_COMPAT #include <linux/poll.h> #include <linux/miscdevice.h> #endif #define DRV_PFX "sony-laptop: " #define dprintk(msg...) do { \ if (debug) printk(KERN_WARNING DRV_PFX msg); \ } while (0) #define SONY_LAPTOP_DRIVER_VERSION "0.6" #define SONY_NC_CLASS "sony-nc" #define SONY_NC_HID "SNY5001" #define SONY_NC_DRIVER_NAME "Sony Notebook Control Driver" #define SONY_PIC_CLASS "sony-pic" #define SONY_PIC_HID "SNY6001" #define SONY_PIC_DRIVER_NAME "Sony Programmable IO Control Driver" MODULE_AUTHOR("Stelian Pop, Mattia Dongili"); MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)"); MODULE_LICENSE("GPL"); MODULE_VERSION(SONY_LAPTOP_DRIVER_VERSION); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help " "the development of this driver"); static int no_spic; /* = 0 */ module_param(no_spic, int, 0444); MODULE_PARM_DESC(no_spic, "set this if you don't want to enable the SPIC device"); static int compat; /* = 0 */ module_param(compat, int, 0444); MODULE_PARM_DESC(compat, "set this if you want to enable backward compatibility mode"); static unsigned long mask = 0xffffffff; module_param(mask, ulong, 0644); MODULE_PARM_DESC(mask, "set this to the mask of event you want to enable (see doc)"); static int camera; /* = 0 */ module_param(camera, int, 0444); MODULE_PARM_DESC(camera, "set this to 1 to enable Motion Eye camera controls " "(only use it if you have a C1VE or C1VN model)"); #ifdef CONFIG_SONYPI_COMPAT static int minor = -1; module_param(minor, int, 0); MODULE_PARM_DESC(minor, "minor number of the misc device for the SPIC compatibility code, " "default is -1 (automatic)"); #endif enum sony_nc_rfkill { SONY_WIFI, SONY_BLUETOOTH, SONY_WWAN, SONY_WIMAX, N_SONY_RFKILL, }; static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL]; static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900}; static void sony_nc_rfkill_update(void); /*********** Input Devices ***********/ #define SONY_LAPTOP_BUF_SIZE 128 struct sony_laptop_input_s { atomic_t users; struct input_dev *jog_dev; struct input_dev *key_dev; struct kfifo *fifo; spinlock_t fifo_lock; struct workqueue_struct *wq; }; static struct sony_laptop_input_s sony_laptop_input = { .users = ATOMIC_INIT(0), }; struct sony_laptop_keypress { struct input_dev *dev; int key; }; /* Correspondance table between sonypi events * and input layer indexes in the keymap */ static int sony_laptop_input_index[] = { -1, /* 0 no event */ -1, /* 1 SONYPI_EVENT_JOGDIAL_DOWN */ -1, /* 2 SONYPI_EVENT_JOGDIAL_UP */ -1, /* 3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */ -1, /* 4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */ -1, /* 5 SONYPI_EVENT_JOGDIAL_PRESSED */ -1, /* 6 SONYPI_EVENT_JOGDIAL_RELEASED */ 0, /* 7 SONYPI_EVENT_CAPTURE_PRESSED */ 1, /* 8 SONYPI_EVENT_CAPTURE_RELEASED */ 2, /* 9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */ 3, /* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */ 4, /* 11 SONYPI_EVENT_FNKEY_ESC */ 5, /* 12 SONYPI_EVENT_FNKEY_F1 */ 6, /* 13 SONYPI_EVENT_FNKEY_F2 */ 7, /* 14 SONYPI_EVENT_FNKEY_F3 */ 8, /* 15 SONYPI_EVENT_FNKEY_F4 */ 9, /* 16 SONYPI_EVENT_FNKEY_F5 */ 10, /* 17 SONYPI_EVENT_FNKEY_F6 */ 11, /* 18 SONYPI_EVENT_FNKEY_F7 */ 12, /* 19 SONYPI_EVENT_FNKEY_F8 */ 13, /* 20 SONYPI_EVENT_FNKEY_F9 */ 14, /* 21 SONYPI_EVENT_FNKEY_F10 */ 15, /* 22 SONYPI_EVENT_FNKEY_F11 */ 16, /* 23 SONYPI_EVENT_FNKEY_F12 */ 17, /* 24 SONYPI_EVENT_FNKEY_1 */ 18, /* 25 SONYPI_EVENT_FNKEY_2 */ 19, /* 26 SONYPI_EVENT_FNKEY_D */ 20, /* 27 SONYPI_EVENT_FNKEY_E */ 21, /* 28 SONYPI_EVENT_FNKEY_F */ 22, /* 29 SONYPI_EVENT_FNKEY_S */ 23, /* 30 SONYPI_EVENT_FNKEY_B */ 24, /* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */ 25, /* 32 SONYPI_EVENT_PKEY_P1 */ 26, /* 33 SONYPI_EVENT_PKEY_P2 */ 27, /* 34 SONYPI_EVENT_PKEY_P3 */ 28, /* 35 SONYPI_EVENT_BACK_PRESSED */ -1, /* 36 SONYPI_EVENT_LID_CLOSED */ -1, /* 37 SONYPI_EVENT_LID_OPENED */ 29, /* 38 SONYPI_EVENT_BLUETOOTH_ON */ 30, /* 39 SONYPI_EVENT_BLUETOOTH_OFF */ 31, /* 40 SONYPI_EVENT_HELP_PRESSED */ 32, /* 41 SONYPI_EVENT_FNKEY_ONLY */ 33, /* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */ 34, /* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */ 35, /* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */ 36, /* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */ 37, /* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */ 38, /* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */ 39, /* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */ 40, /* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */ 41, /* 50 SONYPI_EVENT_ZOOM_PRESSED */ 42, /* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */ 43, /* 52 SONYPI_EVENT_MEYE_FACE */ 44, /* 53 SONYPI_EVENT_MEYE_OPPOSITE */ 45, /* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */ 46, /* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */ -1, /* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */ -1, /* 57 SONYPI_EVENT_BATTERY_INSERT */ -1, /* 58 SONYPI_EVENT_BATTERY_REMOVE */ -1, /* 59 SONYPI_EVENT_FNKEY_RELEASED */ 47, /* 60 SONYPI_EVENT_WIRELESS_ON */ 48, /* 61 SONYPI_EVENT_WIRELESS_OFF */ 49, /* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */ 50, /* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */ 51, /* 64 SONYPI_EVENT_CD_EJECT_PRESSED */ 52, /* 65 SONYPI_EVENT_MODEKEY_PRESSED */ 53, /* 66 SONYPI_EVENT_PKEY_P4 */ 54, /* 67 SONYPI_EVENT_PKEY_P5 */ 55, /* 68 SONYPI_EVENT_SETTINGKEY_PRESSED */ 56, /* 69 SONYPI_EVENT_VOLUME_INC_PRESSED */ 57, /* 70 SONYPI_EVENT_VOLUME_DEC_PRESSED */ -1, /* 71 SONYPI_EVENT_BRIGHTNESS_PRESSED */ }; static int sony_laptop_input_keycode_map[] = { KEY_CAMERA, /* 0 SONYPI_EVENT_CAPTURE_PRESSED */ KEY_RESERVED, /* 1 SONYPI_EVENT_CAPTURE_RELEASED */ KEY_RESERVED, /* 2 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */ KEY_RESERVED, /* 3 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */ KEY_FN_ESC, /* 4 SONYPI_EVENT_FNKEY_ESC */ KEY_FN_F1, /* 5 SONYPI_EVENT_FNKEY_F1 */ KEY_FN_F2, /* 6 SONYPI_EVENT_FNKEY_F2 */ KEY_FN_F3, /* 7 SONYPI_EVENT_FNKEY_F3 */ KEY_FN_F4, /* 8 SONYPI_EVENT_FNKEY_F4 */ KEY_FN_F5, /* 9 SONYPI_EVENT_FNKEY_F5 */ KEY_FN_F6, /* 10 SONYPI_EVENT_FNKEY_F6 */ KEY_FN_F7, /* 11 SONYPI_EVENT_FNKEY_F7 */ KEY_FN_F8, /* 12 SONYPI_EVENT_FNKEY_F8 */ KEY_FN_F9, /* 13 SONYPI_EVENT_FNKEY_F9 */ KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */ KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */ KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */ KEY_FN_F1, /* 17 SONYPI_EVENT_FNKEY_1 */ KEY_FN_F2, /* 18 SONYPI_EVENT_FNKEY_2 */ KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */ KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */ KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */ KEY_FN_S, /* 22 SONYPI_EVENT_FNKEY_S */ KEY_FN_B, /* 23 SONYPI_EVENT_FNKEY_B */ KEY_BLUETOOTH, /* 24 SONYPI_EVENT_BLUETOOTH_PRESSED */ KEY_PROG1, /* 25 SONYPI_EVENT_PKEY_P1 */ KEY_PROG2, /* 26 SONYPI_EVENT_PKEY_P2 */ KEY_PROG3, /* 27 SONYPI_EVENT_PKEY_P3 */ KEY_BACK, /* 28 SONYPI_EVENT_BACK_PRESSED */ KEY_BLUETOOTH, /* 29 SONYPI_EVENT_BLUETOOTH_ON */ KEY_BLUETOOTH, /* 30 SONYPI_EVENT_BLUETOOTH_OFF */ KEY_HELP, /* 31 SONYPI_EVENT_HELP_PRESSED */ KEY_FN, /* 32 SONYPI_EVENT_FNKEY_ONLY */ KEY_RESERVED, /* 33 SONYPI_EVENT_JOGDIAL_FAST_DOWN */ KEY_RESERVED, /* 34 SONYPI_EVENT_JOGDIAL_FAST_UP */ KEY_RESERVED, /* 35 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */ KEY_RESERVED, /* 36 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */ KEY_RESERVED, /* 37 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */ KEY_RESERVED, /* 38 SONYPI_EVENT_JOGDIAL_VFAST_UP */ KEY_RESERVED, /* 39 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */ KEY_RESERVED, /* 40 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */ KEY_ZOOM, /* 41 SONYPI_EVENT_ZOOM_PRESSED */ BTN_THUMB, /* 42 SONYPI_EVENT_THUMBPHRASE_PRESSED */ KEY_RESERVED, /* 43 SONYPI_EVENT_MEYE_FACE */ KEY_RESERVED, /* 44 SONYPI_EVENT_MEYE_OPPOSITE */ KEY_RESERVED, /* 45 SONYPI_EVENT_MEMORYSTICK_INSERT */ KEY_RESERVED, /* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */ KEY_WLAN, /* 47 SONYPI_EVENT_WIRELESS_ON */ KEY_WLAN, /* 48 SONYPI_EVENT_WIRELESS_OFF */ KEY_ZOOMIN, /* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */ KEY_ZOOMOUT, /* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */ KEY_EJECTCD, /* 51 SONYPI_EVENT_CD_EJECT_PRESSED */ KEY_F13, /* 52 SONYPI_EVENT_MODEKEY_PRESSED */ KEY_PROG4, /* 53 SONYPI_EVENT_PKEY_P4 */ KEY_F14, /* 54 SONYPI_EVENT_PKEY_P5 */ KEY_F15, /* 55 SONYPI_EVENT_SETTINGKEY_PRESSED */ KEY_VOLUMEUP, /* 56 SONYPI_EVENT_VOLUME_INC_PRESSED */ KEY_VOLUMEDOWN, /* 57 SONYPI_EVENT_VOLUME_DEC_PRESSED */ }; /* release buttons after a short delay if pressed */ static void do_sony_laptop_release_key(struct work_struct *work) { struct sony_laptop_keypress kp; while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) { msleep(10); input_report_key(kp.dev, kp.key, 0); input_sync(kp.dev); } } static DECLARE_WORK(sony_laptop_release_key_work, do_sony_laptop_release_key); /* forward event to the input subsystem */ static void sony_laptop_report_input_event(u8 event) { struct input_dev *jog_dev = sony_laptop_input.jog_dev; struct input_dev *key_dev = sony_laptop_input.key_dev; struct sony_laptop_keypress kp = { NULL }; if (event == SONYPI_EVENT_FNKEY_RELEASED || event == SONYPI_EVENT_ANYBUTTON_RELEASED) { /* Nothing, not all VAIOs generate this event */ return; } /* report events */ switch (event) { /* jog_dev events */ case SONYPI_EVENT_JOGDIAL_UP: case SONYPI_EVENT_JOGDIAL_UP_PRESSED: input_report_rel(jog_dev, REL_WHEEL, 1); input_sync(jog_dev); return; case SONYPI_EVENT_JOGDIAL_DOWN: case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED: input_report_rel(jog_dev, REL_WHEEL, -1); input_sync(jog_dev); return; /* key_dev events */ case SONYPI_EVENT_JOGDIAL_PRESSED: kp.key = BTN_MIDDLE; kp.dev = jog_dev; break; default: if (event >= ARRAY_SIZE(sony_laptop_input_index)) { dprintk("sony_laptop_report_input_event, event not known: %d\n", event); break; } if (sony_laptop_input_index[event] != -1) { kp.key = sony_laptop_input_keycode_map[sony_laptop_input_index[event]]; if (kp.key != KEY_UNKNOWN) kp.dev = key_dev; } break; } if (kp.dev) { input_report_key(kp.dev, kp.key, 1); /* we emit the scancode so we can always remap the key */ input_event(kp.dev, EV_MSC, MSC_SCAN, event); input_sync(kp.dev); kfifo_put(sony_laptop_input.fifo, (unsigned char *)&kp, sizeof(kp)); if (!work_pending(&sony_laptop_release_key_work)) queue_work(sony_laptop_input.wq, &sony_laptop_release_key_work); } else dprintk("unknown input event %.2x\n", event); } static int sony_laptop_setup_input(struct acpi_device *acpi_device) { struct input_dev *jog_dev; struct input_dev *key_dev; int i; int error; /* don't run again if already initialized */ if (atomic_add_return(1, &sony_laptop_input.users) > 1) return 0; /* kfifo */ spin_lock_init(&sony_laptop_input.fifo_lock); sony_laptop_input.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL, &sony_laptop_input.fifo_lock); if (IS_ERR(sony_laptop_input.fifo)) { printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); error = PTR_ERR(sony_laptop_input.fifo); goto err_dec_users; } /* init workqueue */ sony_laptop_input.wq = create_singlethread_workqueue("sony-laptop"); if (!sony_laptop_input.wq) { printk(KERN_ERR DRV_PFX "Unable to create workqueue.\n"); error = -ENXIO; goto err_free_kfifo; } /* input keys */ key_dev = input_allocate_device(); if (!key_dev) { error = -ENOMEM; goto err_destroy_wq; } key_dev->name = "Sony Vaio Keys"; key_dev->id.bustype = BUS_ISA; key_dev->id.vendor = PCI_VENDOR_ID_SONY; key_dev->dev.parent = &acpi_device->dev; /* Initialize the Input Drivers: special keys */ set_bit(EV_KEY, key_dev->evbit); set_bit(EV_MSC, key_dev->evbit); set_bit(MSC_SCAN, key_dev->mscbit); key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]); key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map); key_dev->keycode = &sony_laptop_input_keycode_map; for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) { if (sony_laptop_input_keycode_map[i] != KEY_RESERVED) { set_bit(sony_laptop_input_keycode_map[i], key_dev->keybit); } } error = input_register_device(key_dev); if (error) goto err_free_keydev; sony_laptop_input.key_dev = key_dev; /* jogdial */ jog_dev = input_allocate_device(); if (!jog_dev) { error = -ENOMEM; goto err_unregister_keydev; } jog_dev->name = "Sony Vaio Jogdial"; jog_dev->id.bustype = BUS_ISA; jog_dev->id.vendor = PCI_VENDOR_ID_SONY; key_dev->dev.parent = &acpi_device->dev; jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE); jog_dev->relbit[0] = BIT_MASK(REL_WHEEL); error = input_register_device(jog_dev); if (error) goto err_free_jogdev; sony_laptop_input.jog_dev = jog_dev; return 0; err_free_jogdev: input_free_device(jog_dev); err_unregister_keydev: input_unregister_device(key_dev); /* to avoid kref underflow below at input_free_device */ key_dev = NULL; err_free_keydev: input_free_device(key_dev); err_destroy_wq: destroy_workqueue(sony_laptop_input.wq); err_free_kfifo: kfifo_free(sony_laptop_input.fifo); err_dec_users: atomic_dec(&sony_laptop_input.users); return error; } static void sony_laptop_remove_input(void) { /* cleanup only after the last user has gone */ if (!atomic_dec_and_test(&sony_laptop_input.users)) return; /* flush workqueue first */ flush_workqueue(sony_laptop_input.wq); /* destroy input devs */ input_unregister_device(sony_laptop_input.key_dev); sony_laptop_input.key_dev = NULL; if (sony_laptop_input.jog_dev) { input_unregister_device(sony_laptop_input.jog_dev); sony_laptop_input.jog_dev = NULL; } destroy_workqueue(sony_laptop_input.wq); kfifo_free(sony_laptop_input.fifo); } /*********** Platform Device ***********/ static atomic_t sony_pf_users = ATOMIC_INIT(0); static struct platform_driver sony_pf_driver = { .driver = { .name = "sony-laptop", .owner = THIS_MODULE, } }; static struct platform_device *sony_pf_device; static int sony_pf_add(void) { int ret = 0; /* don't run again if already initialized */ if (atomic_add_return(1, &sony_pf_users) > 1) return 0; ret = platform_driver_register(&sony_pf_driver); if (ret) goto out; sony_pf_device = platform_device_alloc("sony-laptop", -1); if (!sony_pf_device) { ret = -ENOMEM; goto out_platform_registered; } ret = platform_device_add(sony_pf_device); if (ret) goto out_platform_alloced; return 0; out_platform_alloced: platform_device_put(sony_pf_device); sony_pf_device = NULL; out_platform_registered: platform_driver_unregister(&sony_pf_driver); out: atomic_dec(&sony_pf_users); return ret; } static void sony_pf_remove(void) { /* deregister only after the last user has gone */ if (!atomic_dec_and_test(&sony_pf_users)) return; platform_device_del(sony_pf_device); platform_device_put(sony_pf_device); platform_driver_unregister(&sony_pf_driver); } /*********** SNC (SNY5001) Device ***********/ /* the device uses 1-based values, while the backlight subsystem uses 0-based values */ #define SONY_MAX_BRIGHTNESS 8 #define SNC_VALIDATE_IN 0 #define SNC_VALIDATE_OUT 1 static ssize_t sony_nc_sysfs_show(struct device *, struct device_attribute *, char *); static ssize_t sony_nc_sysfs_store(struct device *, struct device_attribute *, const char *, size_t); static int boolean_validate(const int, const int); static int brightness_default_validate(const int, const int); struct sony_nc_value { char *name; /* name of the entry */ char **acpiget; /* names of the ACPI get function */ char **acpiset; /* names of the ACPI set function */ int (*validate)(const int, const int); /* input/output validation */ int value; /* current setting */ int valid; /* Has ever been set */ int debug; /* active only in debug mode ? */ struct device_attribute devattr; /* sysfs atribute */ }; #define SNC_HANDLE_NAMES(_name, _values...) \ static char *snc_##_name[] = { _values, NULL } #define SNC_HANDLE(_name, _getters, _setters, _validate, _debug) \ { \ .name = __stringify(_name), \ .acpiget = _getters, \ .acpiset = _setters, \ .validate = _validate, \ .debug = _debug, \ .devattr = __ATTR(_name, 0, sony_nc_sysfs_show, sony_nc_sysfs_store), \ } #define SNC_HANDLE_NULL { .name = NULL } SNC_HANDLE_NAMES(fnkey_get, "GHKE"); SNC_HANDLE_NAMES(brightness_def_get, "GPBR"); SNC_HANDLE_NAMES(brightness_def_set, "SPBR"); SNC_HANDLE_NAMES(cdpower_get, "GCDP"); SNC_HANDLE_NAMES(cdpower_set, "SCDP", "CDPW"); SNC_HANDLE_NAMES(audiopower_get, "GAZP"); SNC_HANDLE_NAMES(audiopower_set, "AZPW"); SNC_HANDLE_NAMES(lanpower_get, "GLNP"); SNC_HANDLE_NAMES(lanpower_set, "LNPW"); SNC_HANDLE_NAMES(lidstate_get, "GLID"); SNC_HANDLE_NAMES(indicatorlamp_get, "GILS"); SNC_HANDLE_NAMES(indicatorlamp_set, "SILS"); SNC_HANDLE_NAMES(gainbass_get, "GMGB"); SNC_HANDLE_NAMES(gainbass_set, "CMGB"); SNC_HANDLE_NAMES(PID_get, "GPID"); SNC_HANDLE_NAMES(CTR_get, "GCTR"); SNC_HANDLE_NAMES(CTR_set, "SCTR"); SNC_HANDLE_NAMES(PCR_get, "GPCR"); SNC_HANDLE_NAMES(PCR_set, "SPCR"); SNC_HANDLE_NAMES(CMI_get, "GCMI"); SNC_HANDLE_NAMES(CMI_set, "SCMI"); static struct sony_nc_value sony_nc_values[] = { SNC_HANDLE(brightness_default, snc_brightness_def_get, snc_brightness_def_set, brightness_default_validate, 0), SNC_HANDLE(fnkey, snc_fnkey_get, NULL, NULL, 0), SNC_HANDLE(cdpower, snc_cdpower_get, snc_cdpower_set, boolean_validate, 0), SNC_HANDLE(audiopower, snc_audiopower_get, snc_audiopower_set, boolean_validate, 0), SNC_HANDLE(lanpower, snc_lanpower_get, snc_lanpower_set, boolean_validate, 1), SNC_HANDLE(lidstate, snc_lidstate_get, NULL, boolean_validate, 0), SNC_HANDLE(indicatorlamp, snc_indicatorlamp_get, snc_indicatorlamp_set, boolean_validate, 0), SNC_HANDLE(gainbass, snc_gainbass_get, snc_gainbass_set, boolean_validate, 0), /* unknown methods */ SNC_HANDLE(PID, snc_PID_get, NULL, NULL, 1), SNC_HANDLE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1), SNC_HANDLE(PCR, snc_PCR_get, snc_PCR_set, NULL, 1), SNC_HANDLE(CMI, snc_CMI_get, snc_CMI_set, NULL, 1), SNC_HANDLE_NULL }; static acpi_handle sony_nc_acpi_handle; static struct acpi_device *sony_nc_acpi_device = NULL; /* * acpi_evaluate_object wrappers */ static int acpi_callgetfunc(acpi_handle handle, char *name, int *result) { struct acpi_buffer output; union acpi_object out_obj; acpi_status status; output.length = sizeof(out_obj); output.pointer = &out_obj; status = acpi_evaluate_object(handle, name, NULL, &output); if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) { *result = out_obj.integer.value; return 0; } printk(KERN_WARNING DRV_PFX "acpi_callreadfunc failed\n"); return -1; } static int acpi_callsetfunc(acpi_handle handle, char *name, int value, int *result) { struct acpi_object_list params; union acpi_object in_obj; struct acpi_buffer output; union acpi_object out_obj; acpi_status status; params.count = 1; params.pointer = &in_obj; in_obj.type = ACPI_TYPE_INTEGER; in_obj.integer.value = value; output.length = sizeof(out_obj); output.pointer = &out_obj; status = acpi_evaluate_object(handle, name, &params, &output); if (status == AE_OK) { if (result != NULL) { if (out_obj.type != ACPI_TYPE_INTEGER) { printk(KERN_WARNING DRV_PFX "acpi_evaluate_object bad " "return type\n"); return -1; } *result = out_obj.integer.value; } return 0; } printk(KERN_WARNING DRV_PFX "acpi_evaluate_object failed\n"); return -1; } static int sony_find_snc_handle(int handle) { int i; int result; for (i = 0x20; i < 0x30; i++) { acpi_callsetfunc(sony_nc_acpi_handle, "SN00", i, &result); if (result == handle) return i-0x20; } return -1; } static int sony_call_snc_handle(int handle, int argument, int *result) { int offset = sony_find_snc_handle(handle); if (offset < 0) return -1; return acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, result); } /* * sony_nc_values input/output validate functions */ /* brightness_default_validate: * * manipulate input output values to keep consistency with the * backlight framework for which brightness values are 0-based. */ static int brightness_default_validate(const int direction, const int value) { switch (direction) { case SNC_VALIDATE_OUT: return value - 1; case SNC_VALIDATE_IN: if (value >= 0 && value < SONY_MAX_BRIGHTNESS) return value + 1; } return -EINVAL; } /* boolean_validate: * * on input validate boolean values 0/1, on output just pass the * received value. */ static int boolean_validate(const int direction, const int value) { if (direction == SNC_VALIDATE_IN) { if (value != 0 && value != 1) return -EINVAL; } return value; } /* * Sysfs show/store common to all sony_nc_values */ static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr, char *buffer) { int value; struct sony_nc_value *item = container_of(attr, struct sony_nc_value, devattr); if (!*item->acpiget) return -EIO; if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0) return -EIO; if (item->validate) value = item->validate(SNC_VALIDATE_OUT, value); return snprintf(buffer, PAGE_SIZE, "%d\n", value); } static ssize_t sony_nc_sysfs_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { int value; struct sony_nc_value *item = container_of(attr, struct sony_nc_value, devattr); if (!item->acpiset) return -EIO; if (count > 31) return -EINVAL; value = simple_strtoul(buffer, NULL, 10); if (item->validate) value = item->validate(SNC_VALIDATE_IN, value); if (value < 0) return value; if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0) return -EIO; item->value = value; item->valid = 1; return count; } /* * Backlight device */ static int sony_backlight_update_status(struct backlight_device *bd) { return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", bd->props.brightness + 1, NULL); } static int sony_backlight_get_brightness(struct backlight_device *bd) { int value; if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) return 0; /* brightness levels are 1-based, while backlight ones are 0-based */ return value - 1; } static struct backlight_device *sony_backlight_device; static struct backlight_ops sony_backlight_ops = { .update_status = sony_backlight_update_status, .get_brightness = sony_backlight_get_brightness, }; /* * New SNC-only Vaios event mapping to driver known keys */ struct sony_nc_event { u8 data; u8 event; }; static struct sony_nc_event sony_100_events[] = { { 0x90, SONYPI_EVENT_PKEY_P1 }, { 0x10, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x91, SONYPI_EVENT_PKEY_P2 }, { 0x11, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x81, SONYPI_EVENT_FNKEY_F1 }, { 0x01, SONYPI_EVENT_FNKEY_RELEASED }, { 0x82, SONYPI_EVENT_FNKEY_F2 }, { 0x02, SONYPI_EVENT_FNKEY_RELEASED }, { 0x83, SONYPI_EVENT_FNKEY_F3 }, { 0x03, SONYPI_EVENT_FNKEY_RELEASED }, { 0x84, SONYPI_EVENT_FNKEY_F4 }, { 0x04, SONYPI_EVENT_FNKEY_RELEASED }, { 0x85, SONYPI_EVENT_FNKEY_F5 }, { 0x05, SONYPI_EVENT_FNKEY_RELEASED }, { 0x86, SONYPI_EVENT_FNKEY_F6 }, { 0x06, SONYPI_EVENT_FNKEY_RELEASED }, { 0x87, SONYPI_EVENT_FNKEY_F7 }, { 0x07, SONYPI_EVENT_FNKEY_RELEASED }, { 0x89, SONYPI_EVENT_FNKEY_F9 }, { 0x09, SONYPI_EVENT_FNKEY_RELEASED }, { 0x8A, SONYPI_EVENT_FNKEY_F10 }, { 0x0A, SONYPI_EVENT_FNKEY_RELEASED }, { 0x8C, SONYPI_EVENT_FNKEY_F12 }, { 0x0C, SONYPI_EVENT_FNKEY_RELEASED }, { 0x9f, SONYPI_EVENT_CD_EJECT_PRESSED }, { 0x1f, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0, 0 }, }; static struct sony_nc_event sony_127_events[] = { { 0x81, SONYPI_EVENT_MODEKEY_PRESSED }, { 0x01, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x82, SONYPI_EVENT_PKEY_P1 }, { 0x02, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x83, SONYPI_EVENT_PKEY_P2 }, { 0x03, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x84, SONYPI_EVENT_PKEY_P3 }, { 0x04, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x85, SONYPI_EVENT_PKEY_P4 }, { 0x05, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x86, SONYPI_EVENT_PKEY_P5 }, { 0x06, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0x87, SONYPI_EVENT_SETTINGKEY_PRESSED }, { 0x07, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0, 0 }, }; /* * ACPI callbacks */ static void sony_nc_notify(struct acpi_device *device, u32 event) { u32 ev = event; if (ev >= 0x90) { /* New-style event */ int result; int key_handle = 0; ev -= 0x90; if (sony_find_snc_handle(0x100) == ev) key_handle = 0x100; if (sony_find_snc_handle(0x127) == ev) key_handle = 0x127; if (key_handle) { struct sony_nc_event *key_event; if (sony_call_snc_handle(key_handle, 0x200, &result)) { dprintk("sony_nc_notify, unable to decode" " event 0x%.2x 0x%.2x\n", key_handle, ev); /* restore the original event */ ev = event; } else { ev = result & 0xFF; if (key_handle == 0x100) key_event = sony_100_events; else key_event = sony_127_events; for (; key_event->data; key_event++) { if (key_event->data == ev) { ev = key_event->event; break; } } if (!key_event->data) printk(KERN_INFO DRV_PFX "Unknown event: 0x%x 0x%x\n", key_handle, ev); else sony_laptop_report_input_event(ev); } } else if (sony_find_snc_handle(0x124) == ev) { sony_nc_rfkill_update(); return; } } else sony_laptop_report_input_event(ev); dprintk("sony_nc_notify, event: 0x%.2x\n", ev); acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); } static acpi_status sony_walk_callback(acpi_handle handle, u32 level, void *context, void **return_value) { struct acpi_device_info *info; if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) { printk(KERN_WARNING DRV_PFX "method: name: %4.4s, args %X\n", (char *)&info->name, info->param_count); kfree(info); } return AE_OK; } /* * ACPI device */ static int sony_nc_function_setup(struct acpi_device *device) { int result; /* Enable all events */ acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result); /* Setup hotkeys */ sony_call_snc_handle(0x0100, 0, &result); sony_call_snc_handle(0x0101, 0, &result); sony_call_snc_handle(0x0102, 0x100, &result); sony_call_snc_handle(0x0127, 0, &result); return 0; } static int sony_nc_resume(struct acpi_device *device) { struct sony_nc_value *item; acpi_handle handle; for (item = sony_nc_values; item->name; item++) { int ret; if (!item->valid) continue; ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, item->value, NULL); if (ret < 0) { printk("%s: %d\n", __func__, ret); break; } } if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", &handle))) { if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) dprintk("ECON Method failed\n"); } if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", &handle))) { dprintk("Doing SNC setup\n"); sony_nc_function_setup(device); } /* set the last requested brightness level */ if (sony_backlight_device && sony_backlight_update_status(sony_backlight_device) < 0) printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n"); /* re-read rfkill state */ sony_nc_rfkill_update(); return 0; } static void sony_nc_rfkill_cleanup(void) { int i; for (i = 0; i < N_SONY_RFKILL; i++) { if (sony_rfkill_devices[i]) { rfkill_unregister(sony_rfkill_devices[i]); rfkill_destroy(sony_rfkill_devices[i]); } } } static int sony_nc_rfkill_set(void *data, bool blocked) { int result; int argument = sony_rfkill_address[(long) data] + 0x100; if (!blocked) argument |= 0xff0000; return sony_call_snc_handle(0x124, argument, &result); } static const struct rfkill_ops sony_rfkill_ops = { .set_block = sony_nc_rfkill_set, }; static int sony_nc_setup_rfkill(struct acpi_device *device, enum sony_nc_rfkill nc_type) { int err = 0; struct rfkill *rfk; enum rfkill_type type; const char *name; int result; bool hwblock; switch (nc_type) { case SONY_WIFI: type = RFKILL_TYPE_WLAN; name = "sony-wifi"; break; case SONY_BLUETOOTH: type = RFKILL_TYPE_BLUETOOTH; name = "sony-bluetooth"; break; case SONY_WWAN: type = RFKILL_TYPE_WWAN; name = "sony-wwan"; break; case SONY_WIMAX: type = RFKILL_TYPE_WIMAX; name = "sony-wimax"; break; default: return -EINVAL; } rfk = rfkill_alloc(name, &device->dev, type, &sony_rfkill_ops, (void *)nc_type); if (!rfk) return -ENOMEM; sony_call_snc_handle(0x124, 0x200, &result); hwblock = !(result & 0x1); rfkill_set_hw_state(rfk, hwblock); err = rfkill_register(rfk); if (err) { rfkill_destroy(rfk); return err; } sony_rfkill_devices[nc_type] = rfk; return err; } static void sony_nc_rfkill_update() { enum sony_nc_rfkill i; int result; bool hwblock; sony_call_snc_handle(0x124, 0x200, &result); hwblock = !(result & 0x1); for (i = 0; i < N_SONY_RFKILL; i++) { int argument = sony_rfkill_address[i]; if (!sony_rfkill_devices[i]) continue; if (hwblock) { if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) { /* we already know we're blocked */ } continue; } sony_call_snc_handle(0x124, argument, &result); rfkill_set_states(sony_rfkill_devices[i], !(result & 0xf), false); } } static int sony_nc_rfkill_setup(struct acpi_device *device) { int result, ret; if (sony_find_snc_handle(0x124) == -1) return -1; ret = sony_call_snc_handle(0x124, 0xb00, &result); if (ret) { printk(KERN_INFO DRV_PFX "Unable to enumerate rfkill devices: %x\n", ret); return ret; } if (result & 0x1) sony_nc_setup_rfkill(device, SONY_WIFI); if (result & 0x2) sony_nc_setup_rfkill(device, SONY_BLUETOOTH); if (result & 0x1c) sony_nc_setup_rfkill(device, SONY_WWAN); if (result & 0x20) sony_nc_setup_rfkill(device, SONY_WIMAX); return 0; } static int sony_nc_add(struct acpi_device *device) { acpi_status status; int result = 0; acpi_handle handle; struct sony_nc_value *item; printk(KERN_INFO DRV_PFX "%s v%s.\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); sony_nc_acpi_device = device; strcpy(acpi_device_class(device), "sony/hotkey"); sony_nc_acpi_handle = device->handle; /* read device status */ result = acpi_bus_get_status(device); /* bail IFF the above call was successful and the device is not present */ if (!result && !device->status.present) { dprintk("Device not present\n"); result = -ENODEV; goto outwalk; } if (debug) { status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle, 1, sony_walk_callback, NULL, NULL); if (ACPI_FAILURE(status)) { printk(KERN_WARNING DRV_PFX "unable to walk acpi resources\n"); result = -ENODEV; goto outwalk; } } if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", &handle))) { if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) dprintk("ECON Method failed\n"); } if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", &handle))) { dprintk("Doing SNC setup\n"); sony_nc_function_setup(device); sony_nc_rfkill_setup(device); } /* setup input devices and helper fifo */ result = sony_laptop_setup_input(device); if (result) { printk(KERN_ERR DRV_PFX "Unable to create input devices.\n"); goto outwalk; } if (acpi_video_backlight_support()) { printk(KERN_INFO DRV_PFX "brightness ignored, must be " "controlled by ACPI video driver\n"); } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", &handle))) { sony_backlight_device = backlight_device_register("sony", NULL, NULL, &sony_backlight_ops); if (IS_ERR(sony_backlight_device)) { printk(KERN_WARNING DRV_PFX "unable to register backlight device\n"); sony_backlight_device = NULL; } else { sony_backlight_device->props.brightness = sony_backlight_get_brightness (sony_backlight_device); sony_backlight_device->props.max_brightness = SONY_MAX_BRIGHTNESS - 1; } } result = sony_pf_add(); if (result) goto outbacklight; /* create sony_pf sysfs attributes related to the SNC device */ for (item = sony_nc_values; item->name; ++item) { if (!debug && item->debug) continue; /* find the available acpiget as described in the DSDT */ for (; item->acpiget && *item->acpiget; ++item->acpiget) { if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, *item->acpiget, &handle))) { dprintk("Found %s getter: %s\n", item->name, *item->acpiget); item->devattr.attr.mode |= S_IRUGO; break; } } /* find the available acpiset as described in the DSDT */ for (; item->acpiset && *item->acpiset; ++item->acpiset) { if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, *item->acpiset, &handle))) { dprintk("Found %s setter: %s\n", item->name, *item->acpiset); item->devattr.attr.mode |= S_IWUSR; break; } } if (item->devattr.attr.mode != 0) { result = device_create_file(&sony_pf_device->dev, &item->devattr); if (result) goto out_sysfs; } } return 0; out_sysfs: for (item = sony_nc_values; item->name; ++item) { device_remove_file(&sony_pf_device->dev, &item->devattr); } sony_pf_remove(); outbacklight: if (sony_backlight_device) backlight_device_unregister(sony_backlight_device); sony_laptop_remove_input(); outwalk: sony_nc_rfkill_cleanup(); return result; } static int sony_nc_remove(struct acpi_device *device, int type) { struct sony_nc_value *item; if (sony_backlight_device) backlight_device_unregister(sony_backlight_device); sony_nc_acpi_device = NULL; for (item = sony_nc_values; item->name; ++item) { device_remove_file(&sony_pf_device->dev, &item->devattr); } sony_pf_remove(); sony_laptop_remove_input(); sony_nc_rfkill_cleanup(); dprintk(SONY_NC_DRIVER_NAME " removed.\n"); return 0; } static const struct acpi_device_id sony_device_ids[] = { {SONY_NC_HID, 0}, {SONY_PIC_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, sony_device_ids); static const struct acpi_device_id sony_nc_device_ids[] = { {SONY_NC_HID, 0}, {"", 0}, }; static struct acpi_driver sony_nc_driver = { .name = SONY_NC_DRIVER_NAME, .class = SONY_NC_CLASS, .ids = sony_nc_device_ids, .owner = THIS_MODULE, .ops = { .add = sony_nc_add, .remove = sony_nc_remove, .resume = sony_nc_resume, .notify = sony_nc_notify, }, }; /*********** SPIC (SNY6001) Device ***********/ #define SONYPI_DEVICE_TYPE1 0x00000001 #define SONYPI_DEVICE_TYPE2 0x00000002 #define SONYPI_DEVICE_TYPE3 0x00000004 #define SONYPI_DEVICE_TYPE4 0x00000008 #define SONYPI_TYPE1_OFFSET 0x04 #define SONYPI_TYPE2_OFFSET 0x12 #define SONYPI_TYPE3_OFFSET 0x12 struct sony_pic_ioport { struct acpi_resource_io io1; struct acpi_resource_io io2; struct list_head list; }; struct sony_pic_irq { struct acpi_resource_irq irq; struct list_head list; }; struct sonypi_eventtypes { u8 data; unsigned long mask; struct sonypi_event *events; }; struct sony_pic_dev { struct acpi_device *acpi_dev; struct sony_pic_irq *cur_irq; struct sony_pic_ioport *cur_ioport; struct list_head interrupts; struct list_head ioports; struct mutex lock; struct sonypi_eventtypes *event_types; int (*handle_irq)(const u8, const u8); int model; u16 evport_offset; u8 camera_power; u8 bluetooth_power; u8 wwan_power; }; static struct sony_pic_dev spic_dev = { .interrupts = LIST_HEAD_INIT(spic_dev.interrupts), .ioports = LIST_HEAD_INIT(spic_dev.ioports), }; static int spic_drv_registered; /* Event masks */ #define SONYPI_JOGGER_MASK 0x00000001 #define SONYPI_CAPTURE_MASK 0x00000002 #define SONYPI_FNKEY_MASK 0x00000004 #define SONYPI_BLUETOOTH_MASK 0x00000008 #define SONYPI_PKEY_MASK 0x00000010 #define SONYPI_BACK_MASK 0x00000020 #define SONYPI_HELP_MASK 0x00000040 #define SONYPI_LID_MASK 0x00000080 #define SONYPI_ZOOM_MASK 0x00000100 #define SONYPI_THUMBPHRASE_MASK 0x00000200 #define SONYPI_MEYE_MASK 0x00000400 #define SONYPI_MEMORYSTICK_MASK 0x00000800 #define SONYPI_BATTERY_MASK 0x00001000 #define SONYPI_WIRELESS_MASK 0x00002000 struct sonypi_event { u8 data; u8 event; }; /* The set of possible button release events */ static struct sonypi_event sonypi_releaseev[] = { { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED }, { 0, 0 } }; /* The set of possible jogger events */ static struct sonypi_event sonypi_joggerev[] = { { 0x1f, SONYPI_EVENT_JOGDIAL_UP }, { 0x01, SONYPI_EVENT_JOGDIAL_DOWN }, { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED }, { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED }, { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP }, { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN }, { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED }, { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED }, { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP }, { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN }, { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED }, { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED }, { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED }, { 0, 0 } }; /* The set of possible capture button events */ static struct sonypi_event sonypi_captureev[] = { { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED }, { 0x07, SONYPI_EVENT_CAPTURE_PRESSED }, { 0x40, SONYPI_EVENT_CAPTURE_PRESSED }, { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED }, { 0, 0 } }; /* The set of possible fnkeys events */ static struct sonypi_event sonypi_fnkeyev[] = { { 0x10, SONYPI_EVENT_FNKEY_ESC }, { 0x11, SONYPI_EVENT_FNKEY_F1 }, { 0x12, SONYPI_EVENT_FNKEY_F2 }, { 0x13, SONYPI_EVENT_FNKEY_F3 }, { 0x14, SONYPI_EVENT_FNKEY_F4 }, { 0x15, SONYPI_EVENT_FNKEY_F5 }, { 0x16, SONYPI_EVENT_FNKEY_F6 }, { 0x17, SONYPI_EVENT_FNKEY_F7 }, { 0x18, SONYPI_EVENT_FNKEY_F8 }, { 0x19, SONYPI_EVENT_FNKEY_F9 }, { 0x1a, SONYPI_EVENT_FNKEY_F10 }, { 0x1b, SONYPI_EVENT_FNKEY_F11 }, { 0x1c, SONYPI_EVENT_FNKEY_F12 }, { 0x1f, SONYPI_EVENT_FNKEY_RELEASED }, { 0x21, SONYPI_EVENT_FNKEY_1 }, { 0x22, SONYPI_EVENT_FNKEY_2 }, { 0x31, SONYPI_EVENT_FNKEY_D }, { 0x32, SONYPI_EVENT_FNKEY_E }, { 0x33, SONYPI_EVENT_FNKEY_F }, { 0x34, SONYPI_EVENT_FNKEY_S }, { 0x35, SONYPI_EVENT_FNKEY_B }, { 0x36, SONYPI_EVENT_FNKEY_ONLY }, { 0, 0 } }; /* The set of possible program key events */ static struct sonypi_event sonypi_pkeyev[] = { { 0x01, SONYPI_EVENT_PKEY_P1 }, { 0x02, SONYPI_EVENT_PKEY_P2 }, { 0x04, SONYPI_EVENT_PKEY_P3 }, { 0x20, SONYPI_EVENT_PKEY_P1 }, { 0, 0 } }; /* The set of possible bluetooth events */ static struct sonypi_event sonypi_blueev[] = { { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED }, { 0x59, SONYPI_EVENT_BLUETOOTH_ON }, { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF }, { 0, 0 } }; /* The set of possible wireless events */ static struct sonypi_event sonypi_wlessev[] = { { 0x59, SONYPI_EVENT_WIRELESS_ON }, { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, { 0, 0 } }; /* The set of possible back button events */ static struct sonypi_event sonypi_backev[] = { { 0x20, SONYPI_EVENT_BACK_PRESSED }, { 0, 0 } }; /* The set of possible help button events */ static struct sonypi_event sonypi_helpev[] = { { 0x3b, SONYPI_EVENT_HELP_PRESSED }, { 0, 0 } }; /* The set of possible lid events */ static struct sonypi_event sonypi_lidev[] = { { 0x51, SONYPI_EVENT_LID_CLOSED }, { 0x50, SONYPI_EVENT_LID_OPENED }, { 0, 0 } }; /* The set of possible zoom events */ static struct sonypi_event sonypi_zoomev[] = { { 0x39, SONYPI_EVENT_ZOOM_PRESSED }, { 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED }, { 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED }, { 0x04, SONYPI_EVENT_ZOOM_PRESSED }, { 0, 0 } }; /* The set of possible thumbphrase events */ static struct sonypi_event sonypi_thumbphraseev[] = { { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED }, { 0, 0 } }; /* The set of possible motioneye camera events */ static struct sonypi_event sonypi_meyeev[] = { { 0x00, SONYPI_EVENT_MEYE_FACE }, { 0x01, SONYPI_EVENT_MEYE_OPPOSITE }, { 0, 0 } }; /* The set of possible memorystick events */ static struct sonypi_event sonypi_memorystickev[] = { { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT }, { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT }, { 0, 0 } }; /* The set of possible battery events */ static struct sonypi_event sonypi_batteryev[] = { { 0x20, SONYPI_EVENT_BATTERY_INSERT }, { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, { 0, 0 } }; /* The set of possible volume events */ static struct sonypi_event sonypi_volumeev[] = { { 0x01, SONYPI_EVENT_VOLUME_INC_PRESSED }, { 0x02, SONYPI_EVENT_VOLUME_DEC_PRESSED }, { 0, 0 } }; /* The set of possible brightness events */ static struct sonypi_event sonypi_brightnessev[] = { { 0x80, SONYPI_EVENT_BRIGHTNESS_PRESSED }, { 0, 0 } }; static struct sonypi_eventtypes type1_events[] = { { 0, 0xffffffff, sonypi_releaseev }, { 0x70, SONYPI_MEYE_MASK, sonypi_meyeev }, { 0x30, SONYPI_LID_MASK, sonypi_lidev }, { 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev }, { 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev }, { 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, { 0 }, }; static struct sonypi_eventtypes type2_events[] = { { 0, 0xffffffff, sonypi_releaseev }, { 0x38, SONYPI_LID_MASK, sonypi_lidev }, { 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, { 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev }, { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, { 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0x11, SONYPI_BACK_MASK, sonypi_backev }, { 0x21, SONYPI_HELP_MASK, sonypi_helpev }, { 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, { 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0 }, }; static struct sonypi_eventtypes type3_events[] = { { 0, 0xffffffff, sonypi_releaseev }, { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev }, { 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev }, { 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev }, { 0x05, SONYPI_PKEY_MASK, sonypi_volumeev }, { 0x05, SONYPI_PKEY_MASK, sonypi_brightnessev }, { 0 }, }; /* low level spic calls */ #define ITERATIONS_LONG 10000 #define ITERATIONS_SHORT 10 #define wait_on_command(command, iterations) { \ unsigned int n = iterations; \ while (--n && (command)) \ udelay(1); \ if (!n) \ dprintk("command failed at %s : %s (line %d)\n", \ __FILE__, __func__, __LINE__); \ } static u8 sony_pic_call1(u8 dev) { u8 v1, v2; wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(dev, spic_dev.cur_ioport->io1.minimum + 4); v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4); v2 = inb_p(spic_dev.cur_ioport->io1.minimum); dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1); return v2; } static u8 sony_pic_call2(u8 dev, u8 fn) { u8 v1; wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(dev, spic_dev.cur_ioport->io1.minimum + 4); wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(fn, spic_dev.cur_ioport->io1.minimum); v1 = inb_p(spic_dev.cur_ioport->io1.minimum); dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1); return v1; } static u8 sony_pic_call3(u8 dev, u8 fn, u8 v) { u8 v1; wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(dev, spic_dev.cur_ioport->io1.minimum + 4); wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(fn, spic_dev.cur_ioport->io1.minimum); wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG); outb(v, spic_dev.cur_ioport->io1.minimum); v1 = inb_p(spic_dev.cur_ioport->io1.minimum); dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v, v1); return v1; } /* * minidrivers for SPIC models */ static int type3_handle_irq(const u8 data_mask, const u8 ev) { /* * 0x31 could mean we have to take some extra action and wait for * the next irq for some Type3 models, it will generate a new * irq and we can read new data from the device: * - 0x5c and 0x5f requires 0xA0 * - 0x61 requires 0xB3 */ if (data_mask == 0x31) { if (ev == 0x5c || ev == 0x5f) sony_pic_call1(0xA0); else if (ev == 0x61) sony_pic_call1(0xB3); return 0; } return 1; } static void sony_pic_detect_device_type(struct sony_pic_dev *dev) { struct pci_dev *pcidev; pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE1; dev->evport_offset = SONYPI_TYPE1_OFFSET; dev->event_types = type1_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE2; dev->evport_offset = SONYPI_TYPE2_OFFSET; dev->event_types = type2_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE3; dev->handle_irq = type3_handle_irq; dev->evport_offset = SONYPI_TYPE3_OFFSET; dev->event_types = type3_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE3; dev->handle_irq = type3_handle_irq; dev->evport_offset = SONYPI_TYPE3_OFFSET; dev->event_types = type3_events; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_1, NULL); if (pcidev) { dev->model = SONYPI_DEVICE_TYPE3; dev->handle_irq = type3_handle_irq; dev->evport_offset = SONYPI_TYPE3_OFFSET; dev->event_types = type3_events; goto out; } /* default */ dev->model = SONYPI_DEVICE_TYPE2; dev->evport_offset = SONYPI_TYPE2_OFFSET; dev->event_types = type2_events; out: if (pcidev) pci_dev_put(pcidev); printk(KERN_INFO DRV_PFX "detected Type%d model\n", dev->model == SONYPI_DEVICE_TYPE1 ? 1 : dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); } /* camera tests and poweron/poweroff */ #define SONYPI_CAMERA_PICTURE 5 #define SONYPI_CAMERA_CONTROL 0x10 #define SONYPI_CAMERA_BRIGHTNESS 0 #define SONYPI_CAMERA_CONTRAST 1 #define SONYPI_CAMERA_HUE 2 #define SONYPI_CAMERA_COLOR 3 #define SONYPI_CAMERA_SHARPNESS 4 #define SONYPI_CAMERA_EXPOSURE_MASK 0xC #define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3 #define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30 #define SONYPI_CAMERA_MUTE_MASK 0x40 /* the rest don't need a loop until not 0xff */ #define SONYPI_CAMERA_AGC 6 #define SONYPI_CAMERA_AGC_MASK 0x30 #define SONYPI_CAMERA_SHUTTER_MASK 0x7 #define SONYPI_CAMERA_SHUTDOWN_REQUEST 7 #define SONYPI_CAMERA_CONTROL 0x10 #define SONYPI_CAMERA_STATUS 7 #define SONYPI_CAMERA_STATUS_READY 0x2 #define SONYPI_CAMERA_STATUS_POSITION 0x4 #define SONYPI_DIRECTION_BACKWARDS 0x4 #define SONYPI_CAMERA_REVISION 8 #define SONYPI_CAMERA_ROMVERSION 9 static int __sony_pic_camera_ready(void) { u8 v; v = sony_pic_call2(0x8f, SONYPI_CAMERA_STATUS); return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY)); } static int __sony_pic_camera_off(void) { if (!camera) { printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); return -ENODEV; } wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK), ITERATIONS_SHORT); if (spic_dev.camera_power) { sony_pic_call2(0x91, 0); spic_dev.camera_power = 0; } return 0; } static int __sony_pic_camera_on(void) { int i, j, x; if (!camera) { printk(KERN_WARNING DRV_PFX "camera control not enabled\n"); return -ENODEV; } if (spic_dev.camera_power) return 0; for (j = 5; j > 0; j--) { for (x = 0; x < 100 && sony_pic_call2(0x91, 0x1); x++) msleep(10); sony_pic_call1(0x93); for (i = 400; i > 0; i--) { if (__sony_pic_camera_ready()) break; msleep(10); } if (i) break; } if (j == 0) { printk(KERN_WARNING DRV_PFX "failed to power on camera\n"); return -ENODEV; } wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTROL, 0x5a), ITERATIONS_SHORT); spic_dev.camera_power = 1; return 0; } /* External camera command (exported to the motion eye v4l driver) */ int sony_pic_camera_command(int command, u8 value) { if (!camera) return -EIO; mutex_lock(&spic_dev.lock); switch (command) { case SONY_PIC_COMMAND_SETCAMERA: if (value) __sony_pic_camera_on(); else __sony_pic_camera_off(); break; case SONY_PIC_COMMAND_SETCAMERABRIGHTNESS: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_BRIGHTNESS, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERACONTRAST: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTRAST, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERAHUE: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_HUE, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERACOLOR: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_COLOR, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERASHARPNESS: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_SHARPNESS, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERAPICTURE: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, value), ITERATIONS_SHORT); break; case SONY_PIC_COMMAND_SETCAMERAAGC: wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_AGC, value), ITERATIONS_SHORT); break; default: printk(KERN_ERR DRV_PFX "sony_pic_camera_command invalid: %d\n", command); break; } mutex_unlock(&spic_dev.lock); return 0; } EXPORT_SYMBOL(sony_pic_camera_command); /* gprs/edge modem (SZ460N and SZ210P), thanks to Joshua Wise */ static void __sony_pic_set_wwanpower(u8 state) { state = !!state; if (spic_dev.wwan_power == state) return; sony_pic_call2(0xB0, state); sony_pic_call1(0x82); spic_dev.wwan_power = state; } static ssize_t sony_pic_wwanpower_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned long value; if (count > 31) return -EINVAL; value = simple_strtoul(buffer, NULL, 10); mutex_lock(&spic_dev.lock); __sony_pic_set_wwanpower(value); mutex_unlock(&spic_dev.lock); return count; } static ssize_t sony_pic_wwanpower_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count; mutex_lock(&spic_dev.lock); count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.wwan_power); mutex_unlock(&spic_dev.lock); return count; } /* bluetooth subsystem power state */ static void __sony_pic_set_bluetoothpower(u8 state) { state = !!state; if (spic_dev.bluetooth_power == state) return; sony_pic_call2(0x96, state); sony_pic_call1(0x82); spic_dev.bluetooth_power = state; } static ssize_t sony_pic_bluetoothpower_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned long value; if (count > 31) return -EINVAL; value = simple_strtoul(buffer, NULL, 10); mutex_lock(&spic_dev.lock); __sony_pic_set_bluetoothpower(value); mutex_unlock(&spic_dev.lock); return count; } static ssize_t sony_pic_bluetoothpower_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; mutex_lock(&spic_dev.lock); count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.bluetooth_power); mutex_unlock(&spic_dev.lock); return count; } /* fan speed */ /* FAN0 information (reverse engineered from ACPI tables) */ #define SONY_PIC_FAN0_STATUS 0x93 static int sony_pic_set_fanspeed(unsigned long value) { return ec_write(SONY_PIC_FAN0_STATUS, value); } static int sony_pic_get_fanspeed(u8 *value) { return ec_read(SONY_PIC_FAN0_STATUS, value); } static ssize_t sony_pic_fanspeed_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { unsigned long value; if (count > 31) return -EINVAL; value = simple_strtoul(buffer, NULL, 10); if (sony_pic_set_fanspeed(value)) return -EIO; return count; } static ssize_t sony_pic_fanspeed_show(struct device *dev, struct device_attribute *attr, char *buffer) { u8 value = 0; if (sony_pic_get_fanspeed(&value)) return -EIO; return snprintf(buffer, PAGE_SIZE, "%d\n", value); } #define SPIC_ATTR(_name, _mode) \ struct device_attribute spic_attr_##_name = __ATTR(_name, \ _mode, sony_pic_## _name ##_show, \ sony_pic_## _name ##_store) static SPIC_ATTR(bluetoothpower, 0644); static SPIC_ATTR(wwanpower, 0644); static SPIC_ATTR(fanspeed, 0644); static struct attribute *spic_attributes[] = { &spic_attr_bluetoothpower.attr, &spic_attr_wwanpower.attr, &spic_attr_fanspeed.attr, NULL }; static struct attribute_group spic_attribute_group = { .attrs = spic_attributes }; /******** SONYPI compatibility **********/ #ifdef CONFIG_SONYPI_COMPAT /* battery / brightness / temperature addresses */ #define SONYPI_BAT_FLAGS 0x81 #define SONYPI_LCD_LIGHT 0x96 #define SONYPI_BAT1_PCTRM 0xa0 #define SONYPI_BAT1_LEFT 0xa2 #define SONYPI_BAT1_MAXRT 0xa4 #define SONYPI_BAT2_PCTRM 0xa8 #define SONYPI_BAT2_LEFT 0xaa #define SONYPI_BAT2_MAXRT 0xac #define SONYPI_BAT1_MAXTK 0xb0 #define SONYPI_BAT1_FULL 0xb2 #define SONYPI_BAT2_MAXTK 0xb8 #define SONYPI_BAT2_FULL 0xba #define SONYPI_TEMP_STATUS 0xC1 struct sonypi_compat_s { struct fasync_struct *fifo_async; struct kfifo *fifo; spinlock_t fifo_lock; wait_queue_head_t fifo_proc_list; atomic_t open_count; }; static struct sonypi_compat_s sonypi_compat = { .open_count = ATOMIC_INIT(0), }; static int sonypi_misc_fasync(int fd, struct file *filp, int on) { return fasync_helper(fd, filp, on, &sonypi_compat.fifo_async); } static int sonypi_misc_release(struct inode *inode, struct file *file) { atomic_dec(&sonypi_compat.open_count); return 0; } static int sonypi_misc_open(struct inode *inode, struct file *file) { /* Flush input queue on first open */ unsigned long flags; spin_lock_irqsave(sonypi_compat.fifo->lock, flags); if (atomic_inc_return(&sonypi_compat.open_count) == 1) __kfifo_reset(sonypi_compat.fifo); spin_unlock_irqrestore(sonypi_compat.fifo->lock, flags); return 0; } static ssize_t sonypi_misc_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { ssize_t ret; unsigned char c; if ((kfifo_len(sonypi_compat.fifo) == 0) && (file->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_event_interruptible(sonypi_compat.fifo_proc_list, kfifo_len(sonypi_compat.fifo) != 0); if (ret) return ret; while (ret < count && (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) { if (put_user(c, buf++)) return -EFAULT; ret++; } if (ret > 0) { struct inode *inode = file->f_path.dentry->d_inode; inode->i_atime = current_fs_time(inode->i_sb); } return ret; } static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) { poll_wait(file, &sonypi_compat.fifo_proc_list, wait); if (kfifo_len(sonypi_compat.fifo)) return POLLIN | POLLRDNORM; return 0; } static int ec_read16(u8 addr, u16 *value) { u8 val_lb, val_hb; if (ec_read(addr, &val_lb)) return -1; if (ec_read(addr + 1, &val_hb)) return -1; *value = val_lb | (val_hb << 8); return 0; } static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { int ret = 0; void __user *argp = (void __user *)arg; u8 val8; u16 val16; int value; mutex_lock(&spic_dev.lock); switch (cmd) { case SONYPI_IOCGBRT: if (sony_backlight_device == NULL) { ret = -EIO; break; } if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) { ret = -EIO; break; } val8 = ((value & 0xff) - 1) << 5; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSBRT: if (sony_backlight_device == NULL) { ret = -EIO; break; } if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", (val8 >> 5) + 1, NULL)) { ret = -EIO; break; } /* sync the backlight device status */ sony_backlight_device->props.brightness = sony_backlight_get_brightness(sony_backlight_device); break; case SONYPI_IOCGBAT1CAP: if (ec_read16(SONYPI_BAT1_FULL, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT1REM: if (ec_read16(SONYPI_BAT1_LEFT, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT2CAP: if (ec_read16(SONYPI_BAT2_FULL, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBAT2REM: if (ec_read16(SONYPI_BAT2_LEFT, &val16)) { ret = -EIO; break; } if (copy_to_user(argp, &val16, sizeof(val16))) ret = -EFAULT; break; case SONYPI_IOCGBATFLAGS: if (ec_read(SONYPI_BAT_FLAGS, &val8)) { ret = -EIO; break; } val8 &= 0x07; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCGBLUE: val8 = spic_dev.bluetooth_power; if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSBLUE: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } __sony_pic_set_bluetoothpower(val8); break; /* FAN Controls */ case SONYPI_IOCGFAN: if (sony_pic_get_fanspeed(&val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; case SONYPI_IOCSFAN: if (copy_from_user(&val8, argp, sizeof(val8))) { ret = -EFAULT; break; } if (sony_pic_set_fanspeed(val8)) ret = -EIO; break; /* GET Temperature (useful under APM) */ case SONYPI_IOCGTEMP: if (ec_read(SONYPI_TEMP_STATUS, &val8)) { ret = -EIO; break; } if (copy_to_user(argp, &val8, sizeof(val8))) ret = -EFAULT; break; default: ret = -EINVAL; } mutex_unlock(&spic_dev.lock); return ret; } static const struct file_operations sonypi_misc_fops = { .owner = THIS_MODULE, .read = sonypi_misc_read, .poll = sonypi_misc_poll, .open = sonypi_misc_open, .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, }; static struct miscdevice sonypi_misc_device = { .minor = MISC_DYNAMIC_MINOR, .name = "sonypi", .fops = &sonypi_misc_fops, }; static void sonypi_compat_report_event(u8 event) { kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event)); kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN); wake_up_interruptible(&sonypi_compat.fifo_proc_list); } static int sonypi_compat_init(void) { int error; spin_lock_init(&sonypi_compat.fifo_lock); sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL, &sonypi_compat.fifo_lock); if (IS_ERR(sonypi_compat.fifo)) { printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); return PTR_ERR(sonypi_compat.fifo); } init_waitqueue_head(&sonypi_compat.fifo_proc_list); if (minor != -1) sonypi_misc_device.minor = minor; error = misc_register(&sonypi_misc_device); if (error) { printk(KERN_ERR DRV_PFX "misc_register failed\n"); goto err_free_kfifo; } if (minor == -1) printk(KERN_INFO DRV_PFX "device allocated minor is %d\n", sonypi_misc_device.minor); return 0; err_free_kfifo: kfifo_free(sonypi_compat.fifo); return error; } static void sonypi_compat_exit(void) { misc_deregister(&sonypi_misc_device); kfifo_free(sonypi_compat.fifo); } #else static int sonypi_compat_init(void) { return 0; } static void sonypi_compat_exit(void) { } static void sonypi_compat_report_event(u8 event) { } #endif /* CONFIG_SONYPI_COMPAT */ /* * ACPI callbacks */ static acpi_status sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) { u32 i; struct sony_pic_dev *dev = (struct sony_pic_dev *)context; switch (resource->type) { case ACPI_RESOURCE_TYPE_START_DEPENDENT: { /* start IO enumeration */ struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL); if (!ioport) return AE_ERROR; list_add(&ioport->list, &dev->ioports); return AE_OK; } case ACPI_RESOURCE_TYPE_END_DEPENDENT: /* end IO enumeration */ return AE_OK; case ACPI_RESOURCE_TYPE_IRQ: { struct acpi_resource_irq *p = &resource->data.irq; struct sony_pic_irq *interrupt = NULL; if (!p || !p->interrupt_count) { /* * IRQ descriptors may have no IRQ# bits set, * particularly those those w/ _STA disabled */ dprintk("Blank IRQ resource\n"); return AE_OK; } for (i = 0; i < p->interrupt_count; i++) { if (!p->interrupts[i]) { printk(KERN_WARNING DRV_PFX "Invalid IRQ %d\n", p->interrupts[i]); continue; } interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL); if (!interrupt) return AE_ERROR; list_add(&interrupt->list, &dev->interrupts); interrupt->irq.triggering = p->triggering; interrupt->irq.polarity = p->polarity; interrupt->irq.sharable = p->sharable; interrupt->irq.interrupt_count = 1; interrupt->irq.interrupts[0] = p->interrupts[i]; } return AE_OK; } case ACPI_RESOURCE_TYPE_IO: { struct acpi_resource_io *io = &resource->data.io; struct sony_pic_ioport *ioport = list_first_entry(&dev->ioports, struct sony_pic_ioport, list); if (!io) { dprintk("Blank IO resource\n"); return AE_OK; } if (!ioport->io1.minimum) { memcpy(&ioport->io1, io, sizeof(*io)); dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum, ioport->io1.address_length); } else if (!ioport->io2.minimum) { memcpy(&ioport->io2, io, sizeof(*io)); dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum, ioport->io2.address_length); } else { printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n"); return AE_ERROR; } return AE_OK; } default: dprintk("Resource %d isn't an IRQ nor an IO port\n", resource->type); case ACPI_RESOURCE_TYPE_END_TAG: return AE_OK; } return AE_CTRL_TERMINATE; } static int sony_pic_possible_resources(struct acpi_device *device) { int result = 0; acpi_status status = AE_OK; if (!device) return -EINVAL; /* get device status */ /* see acpi_pci_link_get_current acpi_pci_link_get_possible */ dprintk("Evaluating _STA\n"); result = acpi_bus_get_status(device); if (result) { printk(KERN_WARNING DRV_PFX "Unable to read status\n"); goto end; } if (!device->status.enabled) dprintk("Device disabled\n"); else dprintk("Device enabled\n"); /* * Query and parse 'method' */ dprintk("Evaluating %s\n", METHOD_NAME__PRS); status = acpi_walk_resources(device->handle, METHOD_NAME__PRS, sony_pic_read_possible_resource, &spic_dev); if (ACPI_FAILURE(status)) { printk(KERN_WARNING DRV_PFX "Failure evaluating %s\n", METHOD_NAME__PRS); result = -ENODEV; } end: return result; } /* * Disable the spic device by calling its _DIS method */ static int sony_pic_disable(struct acpi_device *device) { acpi_status ret = acpi_evaluate_object(device->handle, "_DIS", NULL, NULL); if (ACPI_FAILURE(ret) && ret != AE_NOT_FOUND) return -ENXIO; dprintk("Device disabled\n"); return 0; } /* * Based on drivers/acpi/pci_link.c:acpi_pci_link_set * * Call _SRS to set current resources */ static int sony_pic_enable(struct acpi_device *device, struct sony_pic_ioport *ioport, struct sony_pic_irq *irq) { acpi_status status; int result = 0; /* Type 1 resource layout is: * IO * IO * IRQNoFlags * End * * Type 2 and 3 resource layout is: * IO * IRQNoFlags * End */ struct { struct acpi_resource res1; struct acpi_resource res2; struct acpi_resource res3; struct acpi_resource res4; } *resource; struct acpi_buffer buffer = { 0, NULL }; if (!ioport || !irq) return -EINVAL; /* init acpi_buffer */ resource = kzalloc(sizeof(*resource) + 1, GFP_KERNEL); if (!resource) return -ENOMEM; buffer.length = sizeof(*resource) + 1; buffer.pointer = resource; /* setup Type 1 resources */ if (spic_dev.model == SONYPI_DEVICE_TYPE1) { /* setup io resources */ resource->res1.type = ACPI_RESOURCE_TYPE_IO; resource->res1.length = sizeof(struct acpi_resource); memcpy(&resource->res1.data.io, &ioport->io1, sizeof(struct acpi_resource_io)); resource->res2.type = ACPI_RESOURCE_TYPE_IO; resource->res2.length = sizeof(struct acpi_resource); memcpy(&resource->res2.data.io, &ioport->io2, sizeof(struct acpi_resource_io)); /* setup irq resource */ resource->res3.type = ACPI_RESOURCE_TYPE_IRQ; resource->res3.length = sizeof(struct acpi_resource); memcpy(&resource->res3.data.irq, &irq->irq, sizeof(struct acpi_resource_irq)); /* we requested a shared irq */ resource->res3.data.irq.sharable = ACPI_SHARED; resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG; } /* setup Type 2/3 resources */ else { /* setup io resource */ resource->res1.type = ACPI_RESOURCE_TYPE_IO; resource->res1.length = sizeof(struct acpi_resource); memcpy(&resource->res1.data.io, &ioport->io1, sizeof(struct acpi_resource_io)); /* setup irq resource */ resource->res2.type = ACPI_RESOURCE_TYPE_IRQ; resource->res2.length = sizeof(struct acpi_resource); memcpy(&resource->res2.data.irq, &irq->irq, sizeof(struct acpi_resource_irq)); /* we requested a shared irq */ resource->res2.data.irq.sharable = ACPI_SHARED; resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG; } /* Attempt to set the resource */ dprintk("Evaluating _SRS\n"); status = acpi_set_current_resources(device->handle, &buffer); /* check for total failure */ if (ACPI_FAILURE(status)) { printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n"); result = -ENODEV; goto end; } /* Necessary device initializations calls (from sonypi) */ sony_pic_call1(0x82); sony_pic_call2(0x81, 0xff); sony_pic_call1(compat ? 0x92 : 0x82); end: kfree(resource); return result; } /***************** * * ISR: some event is available * *****************/ static irqreturn_t sony_pic_irq(int irq, void *dev_id) { int i, j; u8 ev = 0; u8 data_mask = 0; u8 device_event = 0; struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id; ev = inb_p(dev->cur_ioport->io1.minimum); if (dev->cur_ioport->io2.minimum) data_mask = inb_p(dev->cur_ioport->io2.minimum); else data_mask = inb_p(dev->cur_ioport->io1.minimum + dev->evport_offset); dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset); if (ev == 0x00 || ev == 0xff) return IRQ_HANDLED; for (i = 0; dev->event_types[i].mask; i++) { if ((data_mask & dev->event_types[i].data) != dev->event_types[i].data) continue; if (!(mask & dev->event_types[i].mask)) continue; for (j = 0; dev->event_types[i].events[j].event; j++) { if (ev == dev->event_types[i].events[j].data) { device_event = dev->event_types[i].events[j].event; goto found; } } } /* Still not able to decode the event try to pass * it over to the minidriver */ if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0) return IRQ_HANDLED; dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset); return IRQ_HANDLED; found: sony_laptop_report_input_event(device_event); acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event); sonypi_compat_report_event(device_event); return IRQ_HANDLED; } /***************** * * ACPI driver * *****************/ static int sony_pic_remove(struct acpi_device *device, int type) { struct sony_pic_ioport *io, *tmp_io; struct sony_pic_irq *irq, *tmp_irq; if (sony_pic_disable(device)) { printk(KERN_ERR DRV_PFX "Couldn't disable device.\n"); return -ENXIO; } free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); release_region(spic_dev.cur_ioport->io1.minimum, spic_dev.cur_ioport->io1.address_length); if (spic_dev.cur_ioport->io2.minimum) release_region(spic_dev.cur_ioport->io2.minimum, spic_dev.cur_ioport->io2.address_length); sonypi_compat_exit(); sony_laptop_remove_input(); /* pf attrs */ sysfs_remove_group(&sony_pf_device->dev.kobj, &spic_attribute_group); sony_pf_remove(); list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) { list_del(&io->list); kfree(io); } list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) { list_del(&irq->list); kfree(irq); } spic_dev.cur_ioport = NULL; spic_dev.cur_irq = NULL; dprintk(SONY_PIC_DRIVER_NAME " removed.\n"); return 0; } static int sony_pic_add(struct acpi_device *device) { int result; struct sony_pic_ioport *io, *tmp_io; struct sony_pic_irq *irq, *tmp_irq; printk(KERN_INFO DRV_PFX "%s v%s.\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION); spic_dev.acpi_dev = device; strcpy(acpi_device_class(device), "sony/hotkey"); sony_pic_detect_device_type(&spic_dev); mutex_init(&spic_dev.lock); /* read _PRS resources */ result = sony_pic_possible_resources(device); if (result) { printk(KERN_ERR DRV_PFX "Unable to read possible resources.\n"); goto err_free_resources; } /* setup input devices and helper fifo */ result = sony_laptop_setup_input(device); if (result) { printk(KERN_ERR DRV_PFX "Unable to create input devices.\n"); goto err_free_resources; } if (sonypi_compat_init()) goto err_remove_input; /* request io port */ list_for_each_entry_reverse(io, &spic_dev.ioports, list) { if (request_region(io->io1.minimum, io->io1.address_length, "Sony Programable I/O Device")) { dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n", io->io1.minimum, io->io1.maximum, io->io1.address_length); /* Type 1 have 2 ioports */ if (io->io2.minimum) { if (request_region(io->io2.minimum, io->io2.address_length, "Sony Programable I/O Device")) { dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n", io->io2.minimum, io->io2.maximum, io->io2.address_length); spic_dev.cur_ioport = io; break; } else { dprintk("Unable to get I/O port2: " "0x%.4x (0x%.4x) + 0x%.2x\n", io->io2.minimum, io->io2.maximum, io->io2.address_length); release_region(io->io1.minimum, io->io1.address_length); } } else { spic_dev.cur_ioport = io; break; } } } if (!spic_dev.cur_ioport) { printk(KERN_ERR DRV_PFX "Failed to request_region.\n"); result = -ENODEV; goto err_remove_compat; } /* request IRQ */ list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, IRQF_DISABLED, "sony-laptop", &spic_dev)) { dprintk("IRQ: %d - triggering: %d - " "polarity: %d - shr: %d\n", irq->irq.interrupts[0], irq->irq.triggering, irq->irq.polarity, irq->irq.sharable); spic_dev.cur_irq = irq; break; } } if (!spic_dev.cur_irq) { printk(KERN_ERR DRV_PFX "Failed to request_irq.\n"); result = -ENODEV; goto err_release_region; } /* set resource status _SRS */ result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); if (result) { printk(KERN_ERR DRV_PFX "Couldn't enable device.\n"); goto err_free_irq; } spic_dev.bluetooth_power = -1; /* create device attributes */ result = sony_pf_add(); if (result) goto err_disable_device; result = sysfs_create_group(&sony_pf_device->dev.kobj, &spic_attribute_group); if (result) goto err_remove_pf; return 0; err_remove_pf: sony_pf_remove(); err_disable_device: sony_pic_disable(device); err_free_irq: free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev); err_release_region: release_region(spic_dev.cur_ioport->io1.minimum, spic_dev.cur_ioport->io1.address_length); if (spic_dev.cur_ioport->io2.minimum) release_region(spic_dev.cur_ioport->io2.minimum, spic_dev.cur_ioport->io2.address_length); err_remove_compat: sonypi_compat_exit(); err_remove_input: sony_laptop_remove_input(); err_free_resources: list_for_each_entry_safe(io, tmp_io, &spic_dev.ioports, list) { list_del(&io->list); kfree(io); } list_for_each_entry_safe(irq, tmp_irq, &spic_dev.interrupts, list) { list_del(&irq->list); kfree(irq); } spic_dev.cur_ioport = NULL; spic_dev.cur_irq = NULL; return result; } static int sony_pic_suspend(struct acpi_device *device, pm_message_t state) { if (sony_pic_disable(device)) return -ENXIO; return 0; } static int sony_pic_resume(struct acpi_device *device) { sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq); return 0; } static const struct acpi_device_id sony_pic_device_ids[] = { {SONY_PIC_HID, 0}, {"", 0}, }; static struct acpi_driver sony_pic_driver = { .name = SONY_PIC_DRIVER_NAME, .class = SONY_PIC_CLASS, .ids = sony_pic_device_ids, .owner = THIS_MODULE, .ops = { .add = sony_pic_add, .remove = sony_pic_remove, .suspend = sony_pic_suspend, .resume = sony_pic_resume, }, }; static struct dmi_system_id __initdata sonypi_dmi_table[] = { { .ident = "Sony Vaio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"), }, }, { .ident = "Sony Vaio", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), }, }, { } }; static int __init sony_laptop_init(void) { int result; if (!no_spic && dmi_check_system(sonypi_dmi_table)) { result = acpi_bus_register_driver(&sony_pic_driver); if (result) { printk(KERN_ERR DRV_PFX "Unable to register SPIC driver."); goto out; } spic_drv_registered = 1; } result = acpi_bus_register_driver(&sony_nc_driver); if (result) { printk(KERN_ERR DRV_PFX "Unable to register SNC driver."); goto out_unregister_pic; } return 0; out_unregister_pic: if (spic_drv_registered) acpi_bus_unregister_driver(&sony_pic_driver); out: return result; } static void __exit sony_laptop_exit(void) { acpi_bus_unregister_driver(&sony_nc_driver); if (spic_drv_registered) acpi_bus_unregister_driver(&sony_pic_driver); } module_init(sony_laptop_init); module_exit(sony_laptop_exit);
gpl-2.0
UberPinguin/android_kernel_samsung_t769
drivers/gpu/drm/nouveau/nouveau_fbcon.c
756
11728
/* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/screen_info.h> #include <linux/vga_switcheroo.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" #include "drm_fb_helper.h" #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nouveau_crtc.h" #include "nouveau_fb.h" #include "nouveau_fbcon.h" #include "nouveau_dma.h" static int nouveau_fbcon_sync(struct fb_info *info) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; int ret, i; if (!chan || !chan->accel_done || info->state != FBINFO_STATE_RUNNING || info->flags & FBINFO_HWACCEL_DISABLED) return 0; if (RING_SPACE(chan, 4)) { nouveau_fbcon_gpu_lockup(info); return 0; } BEGIN_RING(chan, 0, 0x0104, 1); OUT_RING(chan, 0); BEGIN_RING(chan, 0, 0x0100, 1); OUT_RING(chan, 0); nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); FIRE_RING(chan); ret = -EBUSY; for (i = 0; i < 100000; i++) { if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { ret = 0; break; } DRM_UDELAY(1); } if (ret) { nouveau_fbcon_gpu_lockup(info); return 0; } chan->accel_done = false; return 0; } static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_sync = nouveau_fbcon_sync, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, }; static struct fb_ops nv04_fbcon_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = nv04_fbcon_fillrect, .fb_copyarea = nv04_fbcon_copyarea, .fb_imageblit = nv04_fbcon_imageblit, .fb_sync = nouveau_fbcon_sync, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, }; static struct fb_ops nv50_fbcon_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = nv50_fbcon_fillrect, .fb_copyarea = nv50_fbcon_copyarea, .fb_imageblit = nv50_fbcon_imageblit, .fb_sync = nouveau_fbcon_sync, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, }; static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); nv_crtc->lut.r[regno] = red; nv_crtc->lut.g[regno] = green; nv_crtc->lut.b[regno] = blue; } static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); *red = nv_crtc->lut.r[regno]; *green = nv_crtc->lut.g[regno]; *blue = nv_crtc->lut.b[regno]; } static void nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) { struct fb_info *info = nfbdev->helper.fbdev; struct fb_fillrect rect; /* Clear the entire fbcon. The drm will program every connector * with it's preferred mode. If the sizes differ, one display will * quite likely have garbage around the console. */ rect.dx = rect.dy = 0; rect.width = info->var.xres_virtual; rect.height = info->var.yres_virtual; rect.color = 0; rect.rop = ROP_COPY; info->fbops->fb_fillrect(info, &rect); } static int nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct nouveau_framebuffer *nouveau_fb; struct nouveau_bo *nvbo; struct drm_mode_fb_cmd mode_cmd; struct pci_dev *pdev = dev->pdev; struct device *device = &pdev->dev; int size, ret; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.bpp = sizes->surface_bpp; mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3); mode_cmd.pitch = roundup(mode_cmd.pitch, 256); mode_cmd.depth = sizes->surface_depth; size = mode_cmd.pitch * mode_cmd.height; size = roundup(size, PAGE_SIZE); ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, false, true, &nvbo); if (ret) { NV_ERROR(dev, "failed to allocate framebuffer\n"); goto out; } ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "failed to pin fb: %d\n", ret); nouveau_bo_ref(NULL, &nvbo); goto out; } ret = nouveau_bo_map(nvbo); if (ret) { NV_ERROR(dev, "failed to map fb: %d\n", ret); nouveau_bo_unpin(nvbo); nouveau_bo_ref(NULL, &nvbo); goto out; } mutex_lock(&dev->struct_mutex); info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unref; } ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unref; } info->par = nfbdev; nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo); nouveau_fb = &nfbdev->nouveau_fb; fb = &nouveau_fb->base; /* setup helper */ nfbdev->helper.fb = fb; nfbdev->helper.fbdev = info; strcpy(info->fix.id, "nouveaufb"); if (nouveau_nofbaccel) info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; else info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->fbops = &nouveau_fbcon_ops; info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - dev_priv->vm_vram_base; info->fix.smem_len = size; info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); info->screen_size = size; drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); /* FIXME: we really shouldn't expose mmio space at all */ info->fix.mmio_start = pci_resource_start(pdev, 1); info->fix.mmio_len = pci_resource_len(pdev, 1); /* Set aperture base/size for vesafb takeover */ info->apertures = dev_priv->apertures; if (!info->apertures) { ret = -ENOMEM; goto out_unref; } info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; if (dev_priv->channel && !nouveau_nofbaccel) { switch (dev_priv->card_type) { case NV_50: nv50_fbcon_accel_init(info); info->fbops = &nv50_fbcon_ops; break; default: nv04_fbcon_accel_init(info); info->fbops = &nv04_fbcon_ops; break; }; } nouveau_fbcon_zfill(dev, nfbdev); /* To allow resizeing without swapping buffers */ NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", nouveau_fb->base.width, nouveau_fb->base.height, nvbo->bo.offset, nvbo); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unref: mutex_unlock(&dev->struct_mutex); out: return ret; } static int nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper; int new_fb = 0; int ret; if (!helper->fb) { ret = nouveau_fbcon_create(nfbdev, sizes); if (ret) return ret; new_fb = 1; } return new_fb; } void nouveau_fbcon_output_poll_changed(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); } int nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) { struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; struct fb_info *info; if (nfbdev->helper.fbdev) { info = nfbdev->helper.fbdev; unregister_framebuffer(info); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (nouveau_fb->nvbo) { nouveau_bo_unmap(nouveau_fb->nvbo); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); nouveau_fb->nvbo = NULL; } drm_fb_helper_fini(&nfbdev->helper); drm_framebuffer_cleanup(&nouveau_fb->base); return 0; } void nouveau_fbcon_gpu_lockup(struct fb_info *info) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); info->flags |= FBINFO_HWACCEL_DISABLED; } static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .gamma_set = nouveau_fbcon_gamma_set, .gamma_get = nouveau_fbcon_gamma_get, .fb_probe = nouveau_fbcon_find_or_create_single, }; int nouveau_fbcon_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fbdev *nfbdev; int ret; nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); if (!nfbdev) return -ENOMEM; nfbdev->dev = dev; dev_priv->nfbdev = nfbdev; nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; ret = drm_fb_helper_init(dev, &nfbdev->helper, nv_two_heads(dev) ? 2 : 1, 4); if (ret) { kfree(nfbdev); return ret; } drm_fb_helper_single_add_all_connectors(&nfbdev->helper); drm_fb_helper_initial_config(&nfbdev->helper, 32); return 0; } void nouveau_fbcon_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; if (!dev_priv->nfbdev) return; nouveau_fbcon_destroy(dev, dev_priv->nfbdev); kfree(dev_priv->nfbdev); dev_priv->nfbdev = NULL; } void nouveau_fbcon_save_disable_accel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags; dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; } void nouveau_fbcon_restore_accel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags; } void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) { struct drm_nouveau_private *dev_priv = dev->dev_private; fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state); } void nouveau_fbcon_zfill_all(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_fbcon_zfill(dev, dev_priv->nfbdev); }
gpl-2.0