repo_name
string
path
string
copies
string
size
string
content
string
license
string
KaSt/Kappa34
drivers/acpi/acpica/utcopy.c
4919
28561
/****************************************************************************** * * Module Name: utcopy - Internal to external object translation utilities * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utcopy") /* Local prototypes */ static acpi_status acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object, union acpi_object *external_object, u8 * data_space, acpi_size * buffer_space_used); static acpi_status acpi_ut_copy_ielement_to_ielement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context); static acpi_status acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object, u8 * buffer, acpi_size * space_used); static acpi_status acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj, union acpi_operand_object **return_obj); static acpi_status acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, union acpi_operand_object **internal_object); static acpi_status acpi_ut_copy_simple_object(union acpi_operand_object *source_desc, union acpi_operand_object *dest_desc); static acpi_status acpi_ut_copy_ielement_to_eelement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context); static acpi_status acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, union acpi_operand_object *dest_obj, struct acpi_walk_state *walk_state); /******************************************************************************* * * FUNCTION: acpi_ut_copy_isimple_to_esimple * * PARAMETERS: internal_object - Source object to be copied * external_object - Where to return the copied object * data_space - Where object data is returned (such as * buffer and string data) * buffer_space_used - Length of data_space that was used * * RETURN: Status * * DESCRIPTION: This function is called to copy a simple internal object to * an external object. * * The data_space buffer is assumed to have sufficient space for * the object. * ******************************************************************************/ static acpi_status acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object, union acpi_object *external_object, u8 * data_space, acpi_size * buffer_space_used) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple); *buffer_space_used = 0; /* * Check for NULL object case (could be an uninitialized * package element) */ if (!internal_object) { return_ACPI_STATUS(AE_OK); } /* Always clear the external object */ ACPI_MEMSET(external_object, 0, sizeof(union acpi_object)); /* * In general, the external object will be the same type as * the internal object */ external_object->type = internal_object->common.type; /* However, only a limited number of external types are supported */ switch (internal_object->common.type) { case ACPI_TYPE_STRING: external_object->string.pointer = (char *)data_space; external_object->string.length = internal_object->string.length; *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size) internal_object-> string. length + 1); ACPI_MEMCPY((void *)data_space, (void *)internal_object->string.pointer, (acpi_size) internal_object->string.length + 1); break; case ACPI_TYPE_BUFFER: external_object->buffer.pointer = data_space; external_object->buffer.length = internal_object->buffer.length; *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string. length); ACPI_MEMCPY((void *)data_space, (void *)internal_object->buffer.pointer, internal_object->buffer.length); break; case ACPI_TYPE_INTEGER: external_object->integer.value = internal_object->integer.value; break; case ACPI_TYPE_LOCAL_REFERENCE: /* This is an object reference. */ switch (internal_object->reference.class) { case ACPI_REFCLASS_NAME: /* * For namepath, return the object handle ("reference") * We are referring to the namespace node */ external_object->reference.handle = internal_object->reference.node; external_object->reference.actual_type = acpi_ns_get_type(internal_object->reference.node); break; default: /* All other reference types are unsupported */ return_ACPI_STATUS(AE_TYPE); } break; case ACPI_TYPE_PROCESSOR: external_object->processor.proc_id = internal_object->processor.proc_id; external_object->processor.pblk_address = internal_object->processor.address; external_object->processor.pblk_length = internal_object->processor.length; break; case ACPI_TYPE_POWER: external_object->power_resource.system_level = internal_object->power_resource.system_level; external_object->power_resource.resource_order = internal_object->power_resource.resource_order; break; default: /* * There is no corresponding external object type */ ACPI_ERROR((AE_INFO, "Unsupported object type, cannot convert to external object: %s", acpi_ut_get_type_name(internal_object->common. type))); return_ACPI_STATUS(AE_SUPPORT); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ielement_to_eelement * * PARAMETERS: acpi_pkg_callback * * RETURN: Status * * DESCRIPTION: Copy one package element to another package element * ******************************************************************************/ static acpi_status acpi_ut_copy_ielement_to_eelement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context) { acpi_status status = AE_OK; struct acpi_pkg_info *info = (struct acpi_pkg_info *)context; acpi_size object_space; u32 this_index; union acpi_object *target_object; ACPI_FUNCTION_ENTRY(); this_index = state->pkg.index; target_object = (union acpi_object *) &((union acpi_object *)(state->pkg.dest_object))->package. elements[this_index]; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: /* * This is a simple or null object */ status = acpi_ut_copy_isimple_to_esimple(source_object, target_object, info->free_space, &object_space); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_COPY_TYPE_PACKAGE: /* * Build the package object */ target_object->type = ACPI_TYPE_PACKAGE; target_object->package.count = source_object->package.count; target_object->package.elements = ACPI_CAST_PTR(union acpi_object, info->free_space); /* * Pass the new package object back to the package walk routine */ state->pkg.this_target_obj = target_object; /* * Save space for the array of objects (Package elements) * update the buffer length counter */ object_space = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size) target_object-> package.count * sizeof(union acpi_object)); break; default: return (AE_BAD_PARAMETER); } info->free_space += object_space; info->length += object_space; return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ipackage_to_epackage * * PARAMETERS: internal_object - Pointer to the object we are returning * Buffer - Where the object is returned * space_used - Where the object length is returned * * RETURN: Status * * DESCRIPTION: This function is called to place a package object in a user * buffer. A package object by definition contains other objects. * * The buffer is assumed to have sufficient space for the object. * The caller must have verified the buffer length needed using * the acpi_ut_get_object_size function before calling this function. * ******************************************************************************/ static acpi_status acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object, u8 * buffer, acpi_size * space_used) { union acpi_object *external_object; acpi_status status; struct acpi_pkg_info info; ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage); /* * First package at head of the buffer */ external_object = ACPI_CAST_PTR(union acpi_object, buffer); /* * Free space begins right after the first package */ info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.free_space = buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.object_space = 0; info.num_packages = 1; external_object->type = internal_object->common.type; external_object->package.count = internal_object->package.count; external_object->package.elements = ACPI_CAST_PTR(union acpi_object, info.free_space); /* * Leave room for an array of ACPI_OBJECTS in the buffer * and move the free space past it */ info.length += (acpi_size) external_object->package.count * ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.free_space += external_object->package.count * ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); status = acpi_ut_walk_package_tree(internal_object, external_object, acpi_ut_copy_ielement_to_eelement, &info); *space_used = info.length; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_iobject_to_eobject * * PARAMETERS: internal_object - The internal object to be converted * ret_buffer - Where the object is returned * * RETURN: Status * * DESCRIPTION: This function is called to build an API object to be returned * to the caller. * ******************************************************************************/ acpi_status acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object, struct acpi_buffer *ret_buffer) { acpi_status status; ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject); if (internal_object->common.type == ACPI_TYPE_PACKAGE) { /* * Package object: Copy all subobjects (including * nested packages) */ status = acpi_ut_copy_ipackage_to_epackage(internal_object, ret_buffer->pointer, &ret_buffer->length); } else { /* * Build a simple object (no nested objects) */ status = acpi_ut_copy_isimple_to_esimple(internal_object, ACPI_CAST_PTR(union acpi_object, ret_buffer-> pointer), ACPI_ADD_PTR(u8, ret_buffer-> pointer, ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object))), &ret_buffer->length); /* * build simple does not include the object size in the length * so we add it in here */ ret_buffer->length += sizeof(union acpi_object); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_esimple_to_isimple * * PARAMETERS: external_object - The external object to be converted * ret_internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: This function copies an external object to an internal one. * NOTE: Pointers can be copied, we don't need to copy data. * (The pointers have to be valid in our address space no matter * what we do with them!) * ******************************************************************************/ static acpi_status acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object, union acpi_operand_object **ret_internal_object) { union acpi_operand_object *internal_object; ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple); /* * Simple types supported are: String, Buffer, Integer */ switch (external_object->type) { case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: case ACPI_TYPE_INTEGER: case ACPI_TYPE_LOCAL_REFERENCE: internal_object = acpi_ut_create_internal_object((u8) external_object-> type); if (!internal_object) { return_ACPI_STATUS(AE_NO_MEMORY); } break; case ACPI_TYPE_ANY: /* This is the case for a NULL object */ *ret_internal_object = NULL; return_ACPI_STATUS(AE_OK); default: /* All other types are not supported */ ACPI_ERROR((AE_INFO, "Unsupported object type, cannot convert to internal object: %s", acpi_ut_get_type_name(external_object->type))); return_ACPI_STATUS(AE_SUPPORT); } /* Must COPY string and buffer contents */ switch (external_object->type) { case ACPI_TYPE_STRING: internal_object->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size) external_object->string.length + 1); if (!internal_object->string.pointer) { goto error_exit; } ACPI_MEMCPY(internal_object->string.pointer, external_object->string.pointer, external_object->string.length); internal_object->string.length = external_object->string.length; break; case ACPI_TYPE_BUFFER: internal_object->buffer.pointer = ACPI_ALLOCATE_ZEROED(external_object->buffer.length); if (!internal_object->buffer.pointer) { goto error_exit; } ACPI_MEMCPY(internal_object->buffer.pointer, external_object->buffer.pointer, external_object->buffer.length); internal_object->buffer.length = external_object->buffer.length; /* Mark buffer data valid */ internal_object->buffer.flags |= AOPOBJ_DATA_VALID; break; case ACPI_TYPE_INTEGER: internal_object->integer.value = external_object->integer.value; break; case ACPI_TYPE_LOCAL_REFERENCE: /* TBD: should validate incoming handle */ internal_object->reference.class = ACPI_REFCLASS_NAME; internal_object->reference.node = external_object->reference.handle; break; default: /* Other types can't get here */ break; } *ret_internal_object = internal_object; return_ACPI_STATUS(AE_OK); error_exit: acpi_ut_remove_reference(internal_object); return_ACPI_STATUS(AE_NO_MEMORY); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_epackage_to_ipackage * * PARAMETERS: external_object - The external object to be converted * internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: Copy an external package object to an internal package. * Handles nested packages. * ******************************************************************************/ static acpi_status acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, union acpi_operand_object **internal_object) { acpi_status status = AE_OK; union acpi_operand_object *package_object; union acpi_operand_object **package_elements; u32 i; ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage); /* Create the package object */ package_object = acpi_ut_create_package_object(external_object->package.count); if (!package_object) { return_ACPI_STATUS(AE_NO_MEMORY); } package_elements = package_object->package.elements; /* * Recursive implementation. Probably ok, since nested external packages * as parameters should be very rare. */ for (i = 0; i < external_object->package.count; i++) { status = acpi_ut_copy_eobject_to_iobject(&external_object->package. elements[i], &package_elements[i]); if (ACPI_FAILURE(status)) { /* Truncate package and delete it */ package_object->package.count = i; package_elements[i] = NULL; acpi_ut_remove_reference(package_object); return_ACPI_STATUS(status); } } /* Mark package data valid */ package_object->package.flags |= AOPOBJ_DATA_VALID; *internal_object = package_object; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_eobject_to_iobject * * PARAMETERS: external_object - The external object to be converted * internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: Converts an external object to an internal object. * ******************************************************************************/ acpi_status acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object, union acpi_operand_object **internal_object) { acpi_status status; ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject); if (external_object->type == ACPI_TYPE_PACKAGE) { status = acpi_ut_copy_epackage_to_ipackage(external_object, internal_object); } else { /* * Build a simple object (no nested objects) */ status = acpi_ut_copy_esimple_to_isimple(external_object, internal_object); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_simple_object * * PARAMETERS: source_desc - The internal object to be copied * dest_desc - New target object * * RETURN: Status * * DESCRIPTION: Simple copy of one internal object to another. Reference count * of the destination object is preserved. * ******************************************************************************/ static acpi_status acpi_ut_copy_simple_object(union acpi_operand_object *source_desc, union acpi_operand_object *dest_desc) { u16 reference_count; union acpi_operand_object *next_object; acpi_status status; acpi_size copy_size; /* Save fields from destination that we don't want to overwrite */ reference_count = dest_desc->common.reference_count; next_object = dest_desc->common.next_object; /* * Copy the entire source object over the destination object. * Note: Source can be either an operand object or namespace node. */ copy_size = sizeof(union acpi_operand_object); if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { copy_size = sizeof(struct acpi_namespace_node); } ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc), ACPI_CAST_PTR(char, source_desc), copy_size); /* Restore the saved fields */ dest_desc->common.reference_count = reference_count; dest_desc->common.next_object = next_object; /* New object is not static, regardless of source */ dest_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; /* Handle the objects with extra data */ switch (dest_desc->common.type) { case ACPI_TYPE_BUFFER: /* * Allocate and copy the actual buffer if and only if: * 1) There is a valid buffer pointer * 2) The buffer has a length > 0 */ if ((source_desc->buffer.pointer) && (source_desc->buffer.length)) { dest_desc->buffer.pointer = ACPI_ALLOCATE(source_desc->buffer.length); if (!dest_desc->buffer.pointer) { return (AE_NO_MEMORY); } /* Copy the actual buffer data */ ACPI_MEMCPY(dest_desc->buffer.pointer, source_desc->buffer.pointer, source_desc->buffer.length); } break; case ACPI_TYPE_STRING: /* * Allocate and copy the actual string if and only if: * 1) There is a valid string pointer * (Pointer to a NULL string is allowed) */ if (source_desc->string.pointer) { dest_desc->string.pointer = ACPI_ALLOCATE((acpi_size) source_desc->string. length + 1); if (!dest_desc->string.pointer) { return (AE_NO_MEMORY); } /* Copy the actual string data */ ACPI_MEMCPY(dest_desc->string.pointer, source_desc->string.pointer, (acpi_size) source_desc->string.length + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: /* * We copied the reference object, so we now must add a reference * to the object pointed to by the reference * * DDBHandle reference (from Load/load_table) is a special reference, * it does not have a Reference.Object, so does not need to * increase the reference count */ if (source_desc->reference.class == ACPI_REFCLASS_TABLE) { break; } acpi_ut_add_reference(source_desc->reference.object); break; case ACPI_TYPE_REGION: /* * We copied the Region Handler, so we now must add a reference */ if (dest_desc->region.handler) { acpi_ut_add_reference(dest_desc->region.handler); } break; /* * For Mutex and Event objects, we cannot simply copy the underlying * OS object. We must create a new one. */ case ACPI_TYPE_MUTEX: status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex); if (ACPI_FAILURE(status)) { return status; } break; case ACPI_TYPE_EVENT: status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &dest_desc->event. os_semaphore); if (ACPI_FAILURE(status)) { return status; } break; default: /* Nothing to do for other simple objects */ break; } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ielement_to_ielement * * PARAMETERS: acpi_pkg_callback * * RETURN: Status * * DESCRIPTION: Copy one package element to another package element * ******************************************************************************/ static acpi_status acpi_ut_copy_ielement_to_ielement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context) { acpi_status status = AE_OK; u32 this_index; union acpi_operand_object **this_target_ptr; union acpi_operand_object *target_object; ACPI_FUNCTION_ENTRY(); this_index = state->pkg.index; this_target_ptr = (union acpi_operand_object **) &state->pkg.dest_object->package.elements[this_index]; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: /* A null source object indicates a (legal) null package element */ if (source_object) { /* * This is a simple object, just copy it */ target_object = acpi_ut_create_internal_object(source_object-> common.type); if (!target_object) { return (AE_NO_MEMORY); } status = acpi_ut_copy_simple_object(source_object, target_object); if (ACPI_FAILURE(status)) { goto error_exit; } *this_target_ptr = target_object; } else { /* Pass through a null element */ *this_target_ptr = NULL; } break; case ACPI_COPY_TYPE_PACKAGE: /* * This object is a package - go down another nesting level * Create and build the package object */ target_object = acpi_ut_create_package_object(source_object->package.count); if (!target_object) { return (AE_NO_MEMORY); } target_object->common.flags = source_object->common.flags; /* Pass the new package object back to the package walk routine */ state->pkg.this_target_obj = target_object; /* Store the object pointer in the parent package object */ *this_target_ptr = target_object; break; default: return (AE_BAD_PARAMETER); } return (status); error_exit: acpi_ut_remove_reference(target_object); return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ipackage_to_ipackage * * PARAMETERS: source_obj - Pointer to the source package object * dest_obj - Where the internal object is returned * walk_state - Current Walk state descriptor * * RETURN: Status * * DESCRIPTION: This function is called to copy an internal package object * into another internal package object. * ******************************************************************************/ static acpi_status acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, union acpi_operand_object *dest_obj, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage); dest_obj->common.type = source_obj->common.type; dest_obj->common.flags = source_obj->common.flags; dest_obj->package.count = source_obj->package.count; /* * Create the object array and walk the source package tree */ dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_obj->package. count + 1) * sizeof(void *)); if (!dest_obj->package.elements) { ACPI_ERROR((AE_INFO, "Package allocation failure")); return_ACPI_STATUS(AE_NO_MEMORY); } /* * Copy the package element-by-element by walking the package "tree". * This handles nested packages of arbitrary depth. */ status = acpi_ut_walk_package_tree(source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); if (ACPI_FAILURE(status)) { /* On failure, delete the destination package object */ acpi_ut_remove_reference(dest_obj); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_iobject_to_iobject * * PARAMETERS: source_desc - The internal object to be copied * dest_desc - Where the copied object is returned * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Copy an internal object to a new internal object * ******************************************************************************/ acpi_status acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc, union acpi_operand_object **dest_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject); /* Create the top level object */ *dest_desc = acpi_ut_create_internal_object(source_desc->common.type); if (!*dest_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Copy the object and possible subobjects */ if (source_desc->common.type == ACPI_TYPE_PACKAGE) { status = acpi_ut_copy_ipackage_to_ipackage(source_desc, *dest_desc, walk_state); } else { status = acpi_ut_copy_simple_object(source_desc, *dest_desc); } return_ACPI_STATUS(status); }
gpl-2.0
DroidTh3ory-xx/kernel_lge_trinity_four
drivers/acpi/acpica/rscreate.c
4919
16051
/******************************************************************************* * * Module Name: rscreate - Create resource lists/tables * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #include "acnamesp.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rscreate") /******************************************************************************* * * FUNCTION: acpi_buffer_to_resource * * PARAMETERS: aml_buffer - Pointer to the resource byte stream * aml_buffer_length - Length of the aml_buffer * resource_ptr - Where the converted resource is returned * * RETURN: Status * * DESCRIPTION: Convert a raw AML buffer to a resource list * ******************************************************************************/ acpi_status acpi_buffer_to_resource(u8 *aml_buffer, u16 aml_buffer_length, struct acpi_resource **resource_ptr) { acpi_status status; acpi_size list_size_needed; void *resource; void *current_resource_ptr; /* * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag * is not required here. */ /* Get the required length for the converted resource */ status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length, &list_size_needed); if (status == AE_AML_NO_RESOURCE_END_TAG) { status = AE_OK; } if (ACPI_FAILURE(status)) { return (status); } /* Allocate a buffer for the converted resource */ resource = ACPI_ALLOCATE_ZEROED(list_size_needed); current_resource_ptr = resource; if (!resource) { return (AE_NO_MEMORY); } /* Perform the AML-to-Resource conversion */ status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length, acpi_rs_convert_aml_to_resources, &current_resource_ptr); if (status == AE_AML_NO_RESOURCE_END_TAG) { status = AE_OK; } if (ACPI_FAILURE(status)) { ACPI_FREE(resource); } else { *resource_ptr = resource; } return (status); } /******************************************************************************* * * FUNCTION: acpi_rs_create_resource_list * * PARAMETERS: aml_buffer - Pointer to the resource byte stream * output_buffer - Pointer to the user's buffer * * RETURN: Status: AE_OK if okay, else a valid acpi_status code * If output_buffer is not large enough, output_buffer_length * indicates how large output_buffer should be, else it * indicates how may u8 elements of output_buffer are valid. * * DESCRIPTION: Takes the byte stream returned from a _CRS, _PRS control method * execution and parses the stream to create a linked list * of device resources. * ******************************************************************************/ acpi_status acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer, struct acpi_buffer * output_buffer) { acpi_status status; u8 *aml_start; acpi_size list_size_needed = 0; u32 aml_buffer_length; void *resource; ACPI_FUNCTION_TRACE(rs_create_resource_list); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlBuffer = %p\n", aml_buffer)); /* Params already validated, so we don't re-validate here */ aml_buffer_length = aml_buffer->buffer.length; aml_start = aml_buffer->buffer.pointer; /* * Pass the aml_buffer into a module that can calculate * the buffer size needed for the linked list */ status = acpi_rs_get_list_length(aml_start, aml_buffer_length, &list_size_needed); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X ListSizeNeeded=%X\n", status, (u32) list_size_needed)); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(output_buffer, list_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Do the conversion */ resource = output_buffer->pointer; status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length, acpi_rs_convert_aml_to_resources, &resource); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n", output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_rs_create_pci_routing_table * * PARAMETERS: package_object - Pointer to a union acpi_operand_object * package * output_buffer - Pointer to the user's buffer * * RETURN: Status AE_OK if okay, else a valid acpi_status code. * If the output_buffer is too small, the error will be * AE_BUFFER_OVERFLOW and output_buffer->Length will point * to the size buffer needed. * * DESCRIPTION: Takes the union acpi_operand_object package and creates a * linked list of PCI interrupt descriptions * * NOTE: It is the caller's responsibility to ensure that the start of the * output buffer is aligned properly (if necessary). * ******************************************************************************/ acpi_status acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, struct acpi_buffer *output_buffer) { u8 *buffer; union acpi_operand_object **top_object_list; union acpi_operand_object **sub_object_list; union acpi_operand_object *obj_desc; acpi_size buffer_size_needed = 0; u32 number_of_elements; u32 index; struct acpi_pci_routing_table *user_prt; struct acpi_namespace_node *node; acpi_status status; struct acpi_buffer path_buffer; ACPI_FUNCTION_TRACE(rs_create_pci_routing_table); /* Params already validated, so we don't re-validate here */ /* Get the required buffer length */ status = acpi_rs_get_pci_routing_table_length(package_object, &buffer_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "BufferSizeNeeded = %X\n", (u32) buffer_size_needed)); /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(output_buffer, buffer_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Loop through the ACPI_INTERNAL_OBJECTS - Each object should be a * package that in turn contains an u64 Address, a u8 Pin, * a Name, and a u8 source_index. */ top_object_list = package_object->package.elements; number_of_elements = package_object->package.count; buffer = output_buffer->pointer; user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer); for (index = 0; index < number_of_elements; index++) { /* * Point user_prt past this current structure * * NOTE: On the first iteration, user_prt->Length will * be zero because we cleared the return buffer earlier */ buffer += user_prt->length; user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer); /* * Fill in the Length field with the information we have at this point. * The minus four is to subtract the size of the u8 Source[4] member * because it is added below. */ user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4); /* Each element of the top-level package must also be a package */ if ((*top_object_list)->common.type != ACPI_TYPE_PACKAGE) { ACPI_ERROR((AE_INFO, "(PRT[%u]) Need sub-package, found %s", index, acpi_ut_get_object_type_name (*top_object_list))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* Each sub-package must be of length 4 */ if ((*top_object_list)->package.count != 4) { ACPI_ERROR((AE_INFO, "(PRT[%u]) Need package of length 4, found length %u", index, (*top_object_list)->package.count)); return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT); } /* * Dereference the sub-package. * The sub_object_list will now point to an array of the four IRQ * elements: [Address, Pin, Source, source_index] */ sub_object_list = (*top_object_list)->package.elements; /* 1) First subobject: Dereference the PRT.Address */ obj_desc = sub_object_list[0]; if (obj_desc->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%u].Address) Need Integer, found %s", index, acpi_ut_get_object_type_name(obj_desc))); return_ACPI_STATUS(AE_BAD_DATA); } user_prt->address = obj_desc->integer.value; /* 2) Second subobject: Dereference the PRT.Pin */ obj_desc = sub_object_list[1]; if (obj_desc->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%u].Pin) Need Integer, found %s", index, acpi_ut_get_object_type_name(obj_desc))); return_ACPI_STATUS(AE_BAD_DATA); } user_prt->pin = (u32) obj_desc->integer.value; /* * If the BIOS has erroneously reversed the _PRT source_name (index 2) * and the source_index (index 3), fix it. _PRT is important enough to * workaround this BIOS error. This also provides compatibility with * other ACPI implementations. */ obj_desc = sub_object_list[3]; if (!obj_desc || (obj_desc->common.type != ACPI_TYPE_INTEGER)) { sub_object_list[3] = sub_object_list[2]; sub_object_list[2] = obj_desc; ACPI_WARNING((AE_INFO, "(PRT[%X].Source) SourceName and SourceIndex are reversed, fixed", index)); } /* * 3) Third subobject: Dereference the PRT.source_name * The name may be unresolved (slack mode), so allow a null object */ obj_desc = sub_object_list[2]; if (obj_desc) { switch (obj_desc->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: if (obj_desc->reference.class != ACPI_REFCLASS_NAME) { ACPI_ERROR((AE_INFO, "(PRT[%u].Source) Need name, found Reference Class 0x%X", index, obj_desc->reference.class)); return_ACPI_STATUS(AE_BAD_DATA); } node = obj_desc->reference.node; /* Use *remaining* length of the buffer as max for pathname */ path_buffer.length = output_buffer->length - (u32) ((u8 *) user_prt->source - (u8 *) output_buffer->pointer); path_buffer.pointer = user_prt->source; status = acpi_ns_handle_to_pathname((acpi_handle) node, &path_buffer); /* +1 to include null terminator */ user_prt->length += (u32) ACPI_STRLEN(user_prt->source) + 1; break; case ACPI_TYPE_STRING: ACPI_STRCPY(user_prt->source, obj_desc->string.pointer); /* * Add to the Length field the length of the string * (add 1 for terminator) */ user_prt->length += obj_desc->string.length + 1; break; case ACPI_TYPE_INTEGER: /* * If this is a number, then the Source Name is NULL, since the * entire buffer was zeroed out, we can leave this alone. * * Add to the Length field the length of the u32 NULL */ user_prt->length += sizeof(u32); break; default: ACPI_ERROR((AE_INFO, "(PRT[%u].Source) Need Ref/String/Integer, found %s", index, acpi_ut_get_object_type_name (obj_desc))); return_ACPI_STATUS(AE_BAD_DATA); } } /* Now align the current length */ user_prt->length = (u32) ACPI_ROUND_UP_TO_64BIT(user_prt->length); /* 4) Fourth subobject: Dereference the PRT.source_index */ obj_desc = sub_object_list[3]; if (obj_desc->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%u].SourceIndex) Need Integer, found %s", index, acpi_ut_get_object_type_name(obj_desc))); return_ACPI_STATUS(AE_BAD_DATA); } user_prt->source_index = (u32) obj_desc->integer.value; /* Point to the next union acpi_operand_object in the top level package */ top_object_list++; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n", output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_rs_create_aml_resources * * PARAMETERS: linked_list_buffer - Pointer to the resource linked list * output_buffer - Pointer to the user's buffer * * RETURN: Status AE_OK if okay, else a valid acpi_status code. * If the output_buffer is too small, the error will be * AE_BUFFER_OVERFLOW and output_buffer->Length will point * to the size buffer needed. * * DESCRIPTION: Takes the linked list of device resources and * creates a bytestream to be used as input for the * _SRS control method. * ******************************************************************************/ acpi_status acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer, struct acpi_buffer *output_buffer) { acpi_status status; acpi_size aml_size_needed = 0; ACPI_FUNCTION_TRACE(rs_create_aml_resources); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n", linked_list_buffer)); /* * Params already validated, so we don't re-validate here * * Pass the linked_list_buffer into a module that calculates * the buffer size needed for the byte stream. */ status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n", (u32) aml_size_needed, acpi_format_exception(status))); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(output_buffer, aml_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Do the conversion */ status = acpi_rs_convert_resources_to_aml(linked_list_buffer, aml_size_needed, output_buffer->pointer); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n", output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS(AE_OK); }
gpl-2.0
cameron581/kernel_msm
drivers/acpi/acpica/utstate.c
4919
10325
/******************************************************************************* * * Module Name: utstate - state object support procedures * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstate") /******************************************************************************* * * FUNCTION: acpi_ut_create_pkg_state_and_push * * PARAMETERS: Object - Object to be added to the new state * Action - Increment/Decrement * state_list - List the state will be added to * * RETURN: Status * * DESCRIPTION: Create a new state and push it * ******************************************************************************/ acpi_status acpi_ut_create_pkg_state_and_push(void *internal_object, void *external_object, u16 index, union acpi_generic_state **state_list) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); state = acpi_ut_create_pkg_state(internal_object, external_object, index); if (!state) { return (AE_NO_MEMORY); } acpi_ut_push_generic_state(state_list, state); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_push_generic_state * * PARAMETERS: list_head - Head of the state stack * State - State object to push * * RETURN: None * * DESCRIPTION: Push a state object onto a state stack * ******************************************************************************/ void acpi_ut_push_generic_state(union acpi_generic_state **list_head, union acpi_generic_state *state) { ACPI_FUNCTION_TRACE(ut_push_generic_state); /* Push the state object onto the front of the list (stack) */ state->common.next = *list_head; *list_head = state; return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_pop_generic_state * * PARAMETERS: list_head - Head of the state stack * * RETURN: The popped state object * * DESCRIPTION: Pop a state object from a state stack * ******************************************************************************/ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state **list_head) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_pop_generic_state); /* Remove the state object at the head of the list (stack) */ state = *list_head; if (state) { /* Update the list head */ *list_head = state->common.next; } return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_generic_state * * PARAMETERS: None * * RETURN: The new state object. NULL on failure. * * DESCRIPTION: Create a generic state object. Attempt to obtain one from * the global state cache; If none available, create a new one. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_generic_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); state = acpi_os_acquire_object(acpi_gbl_state_cache); if (state) { /* Initialize */ memset(state, 0, sizeof(union acpi_generic_state)); state->common.descriptor_type = ACPI_DESC_TYPE_STATE; } return (state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_thread_state * * PARAMETERS: None * * RETURN: New Thread State. NULL on failure * * DESCRIPTION: Create a "Thread State" - a flavor of the generic state used * to track per-thread info during method execution * ******************************************************************************/ struct acpi_thread_state *acpi_ut_create_thread_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_create_thread_state); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD; state->thread.thread_id = acpi_os_get_thread_id(); /* Check for invalid thread ID - zero is very bad, it will break things */ if (!state->thread.thread_id) { ACPI_ERROR((AE_INFO, "Invalid zero ID from AcpiOsGetThreadId")); state->thread.thread_id = (acpi_thread_id) 1; } return_PTR((struct acpi_thread_state *)state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_update_state * * PARAMETERS: Object - Initial Object to be installed in the state * Action - Update action to be performed * * RETURN: New state object, null on failure * * DESCRIPTION: Create an "Update State" - a flavor of the generic state used * to update reference counts and delete complex objects such * as packages. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object *object, u16 action) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE; state->update.object = object; state->update.value = action; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_pkg_state * * PARAMETERS: Object - Initial Object to be installed in the state * Action - Update action to be performed * * RETURN: New state object, null on failure * * DESCRIPTION: Create a "Package State" * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object, void *external_object, u16 index) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE; state->pkg.source_object = (union acpi_operand_object *)internal_object; state->pkg.dest_object = external_object; state->pkg.index = index; state->pkg.num_packages = 1; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_control_state * * PARAMETERS: None * * RETURN: New state object, null on failure * * DESCRIPTION: Create a "Control State" - a flavor of the generic state used * to support nested IF/WHILE constructs in the AML. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_control_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_TRACE(ut_create_control_state); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return_PTR(NULL); } /* Init fields specific to the control struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL; state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING; return_PTR(state); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_generic_state * * PARAMETERS: State - The state object to be deleted * * RETURN: None * * DESCRIPTION: Release a state object to the state cache. NULL state objects * are ignored. * ******************************************************************************/ void acpi_ut_delete_generic_state(union acpi_generic_state *state) { ACPI_FUNCTION_TRACE(ut_delete_generic_state); /* Ignore null state */ if (state) { (void)acpi_os_release_object(acpi_gbl_state_cache, state); } return_VOID; }
gpl-2.0
TripNRaVeR/tripndroid-m7-unleashed-3.4
drivers/staging/tidspbridge/core/ue_deh.c
7991
6802
/* * ue_deh.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Implements upper edge DSP exception handling (DEH) functions. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * Copyright (C) 2010 Felipe Contreras * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <plat/dmtimer.h> #include <dspbridge/dbdefs.h> #include <dspbridge/dspdeh.h> #include <dspbridge/dev.h> #include "_tiomap.h" #include "_deh.h" #include <dspbridge/io_sm.h> #include <dspbridge/drv.h> #include <dspbridge/wdt.h> static u32 fault_addr; static void mmu_fault_dpc(unsigned long data) { struct deh_mgr *deh = (void *)data; if (!deh) return; bridge_deh_notify(deh, DSP_MMUFAULT, 0); } static irqreturn_t mmu_fault_isr(int irq, void *data) { struct deh_mgr *deh = data; struct cfg_hostres *resources; u32 event; if (!deh) return IRQ_HANDLED; resources = deh->bridge_context->resources; if (!resources) { dev_dbg(bridge, "%s: Failed to get Host Resources\n", __func__); return IRQ_HANDLED; } hw_mmu_event_status(resources->dmmu_base, &event); if (event == HW_MMU_TRANSLATION_FAULT) { hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr); dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, event, fault_addr); /* * Schedule a DPC directly. In the future, it may be * necessary to check if DSP MMU fault is intended for * Bridge. */ tasklet_schedule(&deh->dpc_tasklet); /* Disable the MMU events, else once we clear it will * start to raise INTs again */ hw_mmu_event_disable(resources->dmmu_base, HW_MMU_TRANSLATION_FAULT); } else { hw_mmu_event_disable(resources->dmmu_base, HW_MMU_ALL_INTERRUPTS); } return IRQ_HANDLED; } int bridge_deh_create(struct deh_mgr **ret_deh, struct dev_object *hdev_obj) { int status; struct deh_mgr *deh; struct bridge_dev_context *hbridge_context = NULL; /* Message manager will be created when a file is loaded, since * size of message buffer in shared memory is configurable in * the base image. */ /* Get Bridge context info. */ dev_get_bridge_context(hdev_obj, &hbridge_context); /* Allocate IO manager object: */ deh = kzalloc(sizeof(*deh), GFP_KERNEL); if (!deh) { status = -ENOMEM; goto err; } /* Create an NTFY object to manage notifications */ deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); if (!deh->ntfy_obj) { status = -ENOMEM; goto err; } ntfy_init(deh->ntfy_obj); /* Create a MMUfault DPC */ tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); /* Fill in context structure */ deh->bridge_context = hbridge_context; /* Install ISR function for DSP MMU fault */ status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, "DspBridge\tiommu fault", deh); if (status < 0) goto err; *ret_deh = deh; return 0; err: bridge_deh_destroy(deh); *ret_deh = NULL; return status; } int bridge_deh_destroy(struct deh_mgr *deh) { if (!deh) return -EFAULT; /* If notification object exists, delete it */ if (deh->ntfy_obj) { ntfy_delete(deh->ntfy_obj); kfree(deh->ntfy_obj); } /* Disable DSP MMU fault */ free_irq(INT_DSP_MMU_IRQ, deh); /* Free DPC object */ tasklet_kill(&deh->dpc_tasklet); /* Deallocate the DEH manager object */ kfree(deh); return 0; } int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, u32 notify_type, struct dsp_notification *hnotification) { if (!deh) return -EFAULT; if (event_mask) return ntfy_register(deh->ntfy_obj, hnotification, event_mask, notify_type); else return ntfy_unregister(deh->ntfy_obj, hnotification); } #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) { struct cfg_hostres *resources; struct hw_mmu_map_attrs_t map_attrs = { .endianism = HW_LITTLE_ENDIAN, .element_size = HW_ELEM_SIZE16BIT, .mixed_size = HW_MMU_CPUES, }; void *dummy_va_addr; resources = dev_context->resources; dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC); /* * Before acking the MMU fault, let's make sure MMU can only * access entry #0. Then add a new entry so that the DSP OS * can continue in order to dump the stack. */ hw_mmu_twl_disable(resources->dmmu_base); hw_mmu_tlb_flush_all(resources->dmmu_base); hw_mmu_tlb_add(resources->dmmu_base, virt_to_phys(dummy_va_addr), fault_addr, HW_PAGE_SIZE4KB, 1, &map_attrs, HW_SET, HW_SET); dsp_clk_enable(DSP_CLK_GPT8); dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); /* Clear MMU interrupt */ hw_mmu_event_ack(resources->dmmu_base, HW_MMU_TRANSLATION_FAULT); dump_dsp_stack(dev_context); dsp_clk_disable(DSP_CLK_GPT8); hw_mmu_disable(resources->dmmu_base); free_page((unsigned long)dummy_va_addr); } #endif static inline const char *event_to_string(int event) { switch (event) { case DSP_SYSERROR: return "DSP_SYSERROR"; break; case DSP_MMUFAULT: return "DSP_MMUFAULT"; break; case DSP_PWRERROR: return "DSP_PWRERROR"; break; case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break; default: return "unkown event"; break; } } void bridge_deh_notify(struct deh_mgr *deh, int event, int info) { struct bridge_dev_context *dev_context; const char *str = event_to_string(event); if (!deh) return; dev_dbg(bridge, "%s: device exception", __func__); dev_context = deh->bridge_context; switch (event) { case DSP_SYSERROR: dev_err(bridge, "%s: %s, info=0x%x", __func__, str, info); #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE dump_dl_modules(dev_context); dump_dsp_stack(dev_context); #endif break; case DSP_MMUFAULT: dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, fault_addr); #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE print_dsp_trace_buffer(dev_context); dump_dl_modules(dev_context); mmu_fault_print_stack(dev_context); #endif break; default: dev_err(bridge, "%s: %s", __func__, str); break; } /* Filter subsequent notifications when an error occurs */ if (dev_context->brd_state != BRD_ERROR) { ntfy_notify(deh->ntfy_obj, event); #ifdef CONFIG_TIDSPBRIDGE_RECOVERY bridge_recover_schedule(); #endif } /* Set the Board state as ERROR */ dev_context->brd_state = BRD_ERROR; /* Disable all the clocks that were enabled by DSP */ dsp_clock_disable_all(dev_context->dsp_per_clks); /* * Avoid the subsequent WDT if it happens once, * also if fatal error occurs. */ dsp_wdt_enable(false); }
gpl-2.0
DIGImend/linux
arch/arm/plat-samsung/wakeup-mask.c
9527
1106
/* arch/arm/plat-samsung/wakeup-mask.c * * Copyright 2010 Ben Dooks <ben-linux@fluff.org> * * Support for wakeup mask interrupts on newer SoCs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/types.h> #include <linux/irq.h> #include <linux/io.h> #include <plat/wakeup-mask.h> #include <plat/pm.h> void samsung_sync_wakemask(void __iomem *reg, struct samsung_wakeup_mask *mask, int nr_mask) { struct irq_data *data; u32 val; val = __raw_readl(reg); for (; nr_mask > 0; nr_mask--, mask++) { if (mask->irq == NO_WAKEUP_IRQ) { val |= mask->bit; continue; } data = irq_get_irq_data(mask->irq); /* bit of a liberty to read this directly from irq_data. */ if (irqd_is_wakeup_set(data)) val &= ~mask->bit; else val |= mask->bit; } printk(KERN_INFO "wakemask %08x => %08x\n", __raw_readl(reg), val); __raw_writel(val, reg); }
gpl-2.0
SlimRoms/kernel_lge_g3
sound/oss/sb_audio.c
10039
27326
/* * sound/oss/sb_audio.c * * Audio routines for Sound Blaster compatible cards. * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * Changes * Alan Cox : Formatting and clean ups * * Status * Mostly working. Weird uart bug causing irq storms * * Daniel J. Rodriksson: Changes to make sb16 work full duplex. * Maybe other 16 bit cards in this code could behave * the same. * Chris Rankin: Use spinlocks instead of CLI/STI */ #include <linux/spinlock.h> #include "sound_config.h" #include "sb_mixer.h" #include "sb.h" #include "sb_ess.h" int sb_audio_open(int dev, int mode) { sb_devc *devc = audio_devs[dev]->devc; unsigned long flags; if (devc == NULL) { printk(KERN_ERR "Sound Blaster: incomplete initialization.\n"); return -ENXIO; } if (devc->caps & SB_NO_RECORDING && mode & OPEN_READ) { if (mode == OPEN_READ) return -EPERM; } spin_lock_irqsave(&devc->lock, flags); if (devc->opened) { spin_unlock_irqrestore(&devc->lock, flags); return -EBUSY; } if (devc->dma16 != -1 && devc->dma16 != devc->dma8 && !devc->duplex) { if (sound_open_dma(devc->dma16, "Sound Blaster 16 bit")) { spin_unlock_irqrestore(&devc->lock, flags); return -EBUSY; } } devc->opened = mode; spin_unlock_irqrestore(&devc->lock, flags); devc->irq_mode = IMODE_NONE; devc->irq_mode_16 = IMODE_NONE; devc->fullduplex = devc->duplex && ((mode & OPEN_READ) && (mode & OPEN_WRITE)); sb_dsp_reset(devc); /* At first glance this check isn't enough, some ESS chips might not * have a RECLEV. However if they don't common_mixer_set will refuse * cause devc->iomap has no register mapping for RECLEV */ if (devc->model == MDL_ESS) ess_mixer_reload (devc, SOUND_MIXER_RECLEV); /* The ALS007 seems to require that the DSP be removed from the output */ /* in order for recording to be activated properly. This is done by */ /* setting the appropriate bits of the output control register 4ch to */ /* zero. This code assumes that the output control registers are not */ /* used anywhere else and therefore the DSP bits are *always* ON for */ /* output and OFF for sampling. */ if (devc->submodel == SUBMDL_ALS007) { if (mode & OPEN_READ) sb_setmixer(devc,ALS007_OUTPUT_CTRL2, sb_getmixer(devc,ALS007_OUTPUT_CTRL2) & 0xf9); else sb_setmixer(devc,ALS007_OUTPUT_CTRL2, sb_getmixer(devc,ALS007_OUTPUT_CTRL2) | 0x06); } return 0; } void sb_audio_close(int dev) { sb_devc *devc = audio_devs[dev]->devc; /* fix things if mmap turned off fullduplex */ if(devc->duplex && !devc->fullduplex && (devc->opened & OPEN_READ) && (devc->opened & OPEN_WRITE)) { struct dma_buffparms *dmap_temp; dmap_temp = audio_devs[dev]->dmap_out; audio_devs[dev]->dmap_out = audio_devs[dev]->dmap_in; audio_devs[dev]->dmap_in = dmap_temp; } audio_devs[dev]->dmap_out->dma = devc->dma8; audio_devs[dev]->dmap_in->dma = ( devc->duplex ) ? devc->dma16 : devc->dma8; if (devc->dma16 != -1 && devc->dma16 != devc->dma8 && !devc->duplex) sound_close_dma(devc->dma16); /* For ALS007, turn DSP output back on if closing the device for read */ if ((devc->submodel == SUBMDL_ALS007) && (devc->opened & OPEN_READ)) { sb_setmixer(devc,ALS007_OUTPUT_CTRL2, sb_getmixer(devc,ALS007_OUTPUT_CTRL2) | 0x06); } devc->opened = 0; } static void sb_set_output_parms(int dev, unsigned long buf, int nr_bytes, int intrflag) { sb_devc *devc = audio_devs[dev]->devc; if (!devc->fullduplex || devc->bits == AFMT_S16_LE) { devc->trg_buf = buf; devc->trg_bytes = nr_bytes; devc->trg_intrflag = intrflag; devc->irq_mode = IMODE_OUTPUT; } else { devc->trg_buf_16 = buf; devc->trg_bytes_16 = nr_bytes; devc->trg_intrflag_16 = intrflag; devc->irq_mode_16 = IMODE_OUTPUT; } } static void sb_set_input_parms(int dev, unsigned long buf, int count, int intrflag) { sb_devc *devc = audio_devs[dev]->devc; if (!devc->fullduplex || devc->bits != AFMT_S16_LE) { devc->trg_buf = buf; devc->trg_bytes = count; devc->trg_intrflag = intrflag; devc->irq_mode = IMODE_INPUT; } else { devc->trg_buf_16 = buf; devc->trg_bytes_16 = count; devc->trg_intrflag_16 = intrflag; devc->irq_mode_16 = IMODE_INPUT; } } /* * SB1.x compatible routines */ static void sb1_audio_output_block(int dev, unsigned long buf, int nr_bytes, int intrflag) { unsigned long flags; int count = nr_bytes; sb_devc *devc = audio_devs[dev]->devc; /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_WRITE); */ if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1; count--; devc->irq_mode = IMODE_OUTPUT; spin_lock_irqsave(&devc->lock, flags); if (sb_dsp_command(devc, 0x14)) /* 8 bit DAC using DMA */ { sb_dsp_command(devc, (unsigned char) (count & 0xff)); sb_dsp_command(devc, (unsigned char) ((count >> 8) & 0xff)); } else printk(KERN_WARNING "Sound Blaster: unable to start DAC.\n"); spin_unlock_irqrestore(&devc->lock, flags); devc->intr_active = 1; } static void sb1_audio_start_input(int dev, unsigned long buf, int nr_bytes, int intrflag) { unsigned long flags; int count = nr_bytes; sb_devc *devc = audio_devs[dev]->devc; /* * Start a DMA input to the buffer pointed by dmaqtail */ /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_READ); */ if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1; count--; devc->irq_mode = IMODE_INPUT; spin_lock_irqsave(&devc->lock, flags); if (sb_dsp_command(devc, 0x24)) /* 8 bit ADC using DMA */ { sb_dsp_command(devc, (unsigned char) (count & 0xff)); sb_dsp_command(devc, (unsigned char) ((count >> 8) & 0xff)); } else printk(KERN_ERR "Sound Blaster: unable to start ADC.\n"); spin_unlock_irqrestore(&devc->lock, flags); devc->intr_active = 1; } static void sb1_audio_trigger(int dev, int bits) { sb_devc *devc = audio_devs[dev]->devc; bits &= devc->irq_mode; if (!bits) sb_dsp_command(devc, 0xd0); /* Halt DMA */ else { switch (devc->irq_mode) { case IMODE_INPUT: sb1_audio_start_input(dev, devc->trg_buf, devc->trg_bytes, devc->trg_intrflag); break; case IMODE_OUTPUT: sb1_audio_output_block(dev, devc->trg_buf, devc->trg_bytes, devc->trg_intrflag); break; } } devc->trigger_bits = bits; } static int sb1_audio_prepare_for_input(int dev, int bsize, int bcount) { sb_devc *devc = audio_devs[dev]->devc; unsigned long flags; spin_lock_irqsave(&devc->lock, flags); if (sb_dsp_command(devc, 0x40)) sb_dsp_command(devc, devc->tconst); sb_dsp_command(devc, DSP_CMD_SPKOFF); spin_unlock_irqrestore(&devc->lock, flags); devc->trigger_bits = 0; return 0; } static int sb1_audio_prepare_for_output(int dev, int bsize, int bcount) { sb_devc *devc = audio_devs[dev]->devc; unsigned long flags; spin_lock_irqsave(&devc->lock, flags); if (sb_dsp_command(devc, 0x40)) sb_dsp_command(devc, devc->tconst); sb_dsp_command(devc, DSP_CMD_SPKON); spin_unlock_irqrestore(&devc->lock, flags); devc->trigger_bits = 0; return 0; } static int sb1_audio_set_speed(int dev, int speed) { int max_speed = 23000; sb_devc *devc = audio_devs[dev]->devc; int tmp; if (devc->opened & OPEN_READ) max_speed = 13000; if (speed > 0) { if (speed < 4000) speed = 4000; if (speed > max_speed) speed = max_speed; devc->tconst = (256 - ((1000000 + speed / 2) / speed)) & 0xff; tmp = 256 - devc->tconst; speed = (1000000 + tmp / 2) / tmp; devc->speed = speed; } return devc->speed; } static short sb1_audio_set_channels(int dev, short channels) { sb_devc *devc = audio_devs[dev]->devc; return devc->channels = 1; } static unsigned int sb1_audio_set_bits(int dev, unsigned int bits) { sb_devc *devc = audio_devs[dev]->devc; return devc->bits = 8; } static void sb1_audio_halt_xfer(int dev) { unsigned long flags; sb_devc *devc = audio_devs[dev]->devc; spin_lock_irqsave(&devc->lock, flags); sb_dsp_reset(devc); spin_unlock_irqrestore(&devc->lock, flags); } /* * SB 2.0 and SB 2.01 compatible routines */ static void sb20_audio_output_block(int dev, unsigned long buf, int nr_bytes, int intrflag) { unsigned long flags; int count = nr_bytes; sb_devc *devc = audio_devs[dev]->devc; unsigned char cmd; /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_WRITE); */ if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1; count--; devc->irq_mode = IMODE_OUTPUT; spin_lock_irqsave(&devc->lock, flags); if (sb_dsp_command(devc, 0x48)) /* DSP Block size */ { sb_dsp_command(devc, (unsigned char) (count & 0xff)); sb_dsp_command(devc, (unsigned char) ((count >> 8) & 0xff)); if (devc->speed * devc->channels <= 23000) cmd = 0x1c; /* 8 bit PCM output */ else cmd = 0x90; /* 8 bit high speed PCM output (SB2.01/Pro) */ if (!sb_dsp_command(devc, cmd)) printk(KERN_ERR "Sound Blaster: unable to start DAC.\n"); } else printk(KERN_ERR "Sound Blaster: unable to start DAC.\n"); spin_unlock_irqrestore(&devc->lock, flags); devc->intr_active = 1; } static void sb20_audio_start_input(int dev, unsigned long buf, int nr_bytes, int intrflag) { unsigned long flags; int count = nr_bytes; sb_devc *devc = audio_devs[dev]->devc; unsigned char cmd; /* * Start a DMA input to the buffer pointed by dmaqtail */ /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_READ); */ if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1; count--; devc->irq_mode = IMODE_INPUT; spin_lock_irqsave(&devc->lock, flags); if (sb_dsp_command(devc, 0x48)) /* DSP Block size */ { sb_dsp_command(devc, (unsigned char) (count & 0xff)); sb_dsp_command(devc, (unsigned char) ((count >> 8) & 0xff)); if (devc->speed * devc->channels <= (devc->major == 3 ? 23000 : 13000)) cmd = 0x2c; /* 8 bit PCM input */ else cmd = 0x98; /* 8 bit high speed PCM input (SB2.01/Pro) */ if (!sb_dsp_command(devc, cmd)) printk(KERN_ERR "Sound Blaster: unable to start ADC.\n"); } else printk(KERN_ERR "Sound Blaster: unable to start ADC.\n"); spin_unlock_irqrestore(&devc->lock, flags); devc->intr_active = 1; } static void sb20_audio_trigger(int dev, int bits) { sb_devc *devc = audio_devs[dev]->devc; bits &= devc->irq_mode; if (!bits) sb_dsp_command(devc, 0xd0); /* Halt DMA */ else { switch (devc->irq_mode) { case IMODE_INPUT: sb20_audio_start_input(dev, devc->trg_buf, devc->trg_bytes, devc->trg_intrflag); break; case IMODE_OUTPUT: sb20_audio_output_block(dev, devc->trg_buf, devc->trg_bytes, devc->trg_intrflag); break; } } devc->trigger_bits = bits; } /* * SB2.01 specific speed setup */ static int sb201_audio_set_speed(int dev, int speed) { sb_devc *devc = audio_devs[dev]->devc; int tmp; int s = speed * devc->channels; if (speed > 0) { if (speed < 4000) speed = 4000; if (speed > 44100) speed = 44100; if (devc->opened & OPEN_READ && speed > 15000) speed = 15000; devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff; tmp = 256 - devc->tconst; speed = ((1000000 + tmp / 2) / tmp) / devc->channels; devc->speed = speed; } return devc->speed; } /* * SB Pro specific routines */ static int sbpro_audio_prepare_for_input(int dev, int bsize, int bcount) { /* For SB Pro and Jazz16 */ sb_devc *devc = audio_devs[dev]->devc; unsigned long flags; unsigned char bits = 0; if (devc->dma16 >= 0 && devc->dma16 != devc->dma8) audio_devs[dev]->dmap_out->dma = audio_devs[dev]->dmap_in->dma = devc->bits == 16 ? devc->dma16 : devc->dma8; if (devc->model == MDL_JAZZ || devc->model == MDL_SMW) if (devc->bits == AFMT_S16_LE) bits = 0x04; /* 16 bit mode */ spin_lock_irqsave(&devc->lock, flags); if (sb_dsp_command(devc, 0x40)) sb_dsp_command(devc, devc->tconst); sb_dsp_command(devc, DSP_CMD_SPKOFF); if (devc->channels == 1) sb_dsp_command(devc, 0xa0 | bits); /* Mono input */ else sb_dsp_command(devc, 0xa8 | bits); /* Stereo input */ spin_unlock_irqrestore(&devc->lock, flags); devc->trigger_bits = 0; return 0; } static int sbpro_audio_prepare_for_output(int dev, int bsize, int bcount) { /* For SB Pro and Jazz16 */ sb_devc *devc = audio_devs[dev]->devc; unsigned long flags; unsigned char tmp; unsigned char bits = 0; if (devc->dma16 >= 0 && devc->dma16 != devc->dma8) audio_devs[dev]->dmap_out->dma = audio_devs[dev]->dmap_in->dma = devc->bits == 16 ? devc->dma16 : devc->dma8; if (devc->model == MDL_SBPRO) sb_mixer_set_stereo(devc, devc->channels == 2); spin_lock_irqsave(&devc->lock, flags); if (sb_dsp_command(devc, 0x40)) sb_dsp_command(devc, devc->tconst); sb_dsp_command(devc, DSP_CMD_SPKON); if (devc->model == MDL_JAZZ || devc->model == MDL_SMW) { if (devc->bits == AFMT_S16_LE) bits = 0x04; /* 16 bit mode */ if (devc->channels == 1) sb_dsp_command(devc, 0xa0 | bits); /* Mono output */ else sb_dsp_command(devc, 0xa8 | bits); /* Stereo output */ spin_unlock_irqrestore(&devc->lock, flags); } else { spin_unlock_irqrestore(&devc->lock, flags); tmp = sb_getmixer(devc, 0x0e); if (devc->channels == 1) tmp &= ~0x02; else tmp |= 0x02; sb_setmixer(devc, 0x0e, tmp); } devc->trigger_bits = 0; return 0; } static int sbpro_audio_set_speed(int dev, int speed) { sb_devc *devc = audio_devs[dev]->devc; if (speed > 0) { if (speed < 4000) speed = 4000; if (speed > 44100) speed = 44100; if (devc->channels > 1 && speed > 22050) speed = 22050; sb201_audio_set_speed(dev, speed); } return devc->speed; } static short sbpro_audio_set_channels(int dev, short channels) { sb_devc *devc = audio_devs[dev]->devc; if (channels == 1 || channels == 2) { if (channels != devc->channels) { devc->channels = channels; if (devc->model == MDL_SBPRO && devc->channels == 2) sbpro_audio_set_speed(dev, devc->speed); } } return devc->channels; } static int jazz16_audio_set_speed(int dev, int speed) { sb_devc *devc = audio_devs[dev]->devc; if (speed > 0) { int tmp; int s = speed * devc->channels; if (speed < 5000) speed = 5000; if (speed > 44100) speed = 44100; devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff; tmp = 256 - devc->tconst; speed = ((1000000 + tmp / 2) / tmp) / devc->channels; devc->speed = speed; } return devc->speed; } /* * SB16 specific routines */ static int sb16_audio_set_speed(int dev, int speed) { sb_devc *devc = audio_devs[dev]->devc; int max_speed = devc->submodel == SUBMDL_ALS100 ? 48000 : 44100; if (speed > 0) { if (speed < 5000) speed = 5000; if (speed > max_speed) speed = max_speed; devc->speed = speed; } return devc->speed; } static unsigned int sb16_audio_set_bits(int dev, unsigned int bits) { sb_devc *devc = audio_devs[dev]->devc; if (bits != 0) { if (bits == AFMT_U8 || bits == AFMT_S16_LE) devc->bits = bits; else devc->bits = AFMT_U8; } return devc->bits; } static int sb16_audio_prepare_for_input(int dev, int bsize, int bcount) { sb_devc *devc = audio_devs[dev]->devc; if (!devc->fullduplex) { audio_devs[dev]->dmap_out->dma = audio_devs[dev]->dmap_in->dma = devc->bits == AFMT_S16_LE ? devc->dma16 : devc->dma8; } else if (devc->bits == AFMT_S16_LE) { audio_devs[dev]->dmap_out->dma = devc->dma8; audio_devs[dev]->dmap_in->dma = devc->dma16; } else { audio_devs[dev]->dmap_out->dma = devc->dma16; audio_devs[dev]->dmap_in->dma = devc->dma8; } devc->trigger_bits = 0; return 0; } static int sb16_audio_prepare_for_output(int dev, int bsize, int bcount) { sb_devc *devc = audio_devs[dev]->devc; if (!devc->fullduplex) { audio_devs[dev]->dmap_out->dma = audio_devs[dev]->dmap_in->dma = devc->bits == AFMT_S16_LE ? devc->dma16 : devc->dma8; } else if (devc->bits == AFMT_S16_LE) { audio_devs[dev]->dmap_out->dma = devc->dma8; audio_devs[dev]->dmap_in->dma = devc->dma16; } else { audio_devs[dev]->dmap_out->dma = devc->dma16; audio_devs[dev]->dmap_in->dma = devc->dma8; } devc->trigger_bits = 0; return 0; } static void sb16_audio_output_block(int dev, unsigned long buf, int count, int intrflag) { unsigned long flags, cnt; sb_devc *devc = audio_devs[dev]->devc; unsigned long bits; if (!devc->fullduplex || devc->bits == AFMT_S16_LE) { devc->irq_mode = IMODE_OUTPUT; devc->intr_active = 1; } else { devc->irq_mode_16 = IMODE_OUTPUT; devc->intr_active_16 = 1; } /* save value */ spin_lock_irqsave(&devc->lock, flags); bits = devc->bits; if (devc->fullduplex) devc->bits = (devc->bits == AFMT_S16_LE) ? AFMT_U8 : AFMT_S16_LE; spin_unlock_irqrestore(&devc->lock, flags); cnt = count; if (devc->bits == AFMT_S16_LE) cnt >>= 1; cnt--; spin_lock_irqsave(&devc->lock, flags); /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_WRITE); */ sb_dsp_command(devc, 0x41); sb_dsp_command(devc, (unsigned char) ((devc->speed >> 8) & 0xff)); sb_dsp_command(devc, (unsigned char) (devc->speed & 0xff)); sb_dsp_command(devc, (devc->bits == AFMT_S16_LE ? 0xb6 : 0xc6)); sb_dsp_command(devc, ((devc->channels == 2 ? 0x20 : 0) + (devc->bits == AFMT_S16_LE ? 0x10 : 0))); sb_dsp_command(devc, (unsigned char) (cnt & 0xff)); sb_dsp_command(devc, (unsigned char) (cnt >> 8)); /* restore real value after all programming */ devc->bits = bits; spin_unlock_irqrestore(&devc->lock, flags); } /* * This fails on the Cyrix MediaGX. If you don't have the DMA enabled * before the first sample arrives it locks up. However even if you * do enable the DMA in time you just get DMA timeouts and missing * interrupts and stuff, so for now I've not bothered fixing this either. */ static void sb16_audio_start_input(int dev, unsigned long buf, int count, int intrflag) { unsigned long flags, cnt; sb_devc *devc = audio_devs[dev]->devc; if (!devc->fullduplex || devc->bits != AFMT_S16_LE) { devc->irq_mode = IMODE_INPUT; devc->intr_active = 1; } else { devc->irq_mode_16 = IMODE_INPUT; devc->intr_active_16 = 1; } cnt = count; if (devc->bits == AFMT_S16_LE) cnt >>= 1; cnt--; spin_lock_irqsave(&devc->lock, flags); /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_READ); */ sb_dsp_command(devc, 0x42); sb_dsp_command(devc, (unsigned char) ((devc->speed >> 8) & 0xff)); sb_dsp_command(devc, (unsigned char) (devc->speed & 0xff)); sb_dsp_command(devc, (devc->bits == AFMT_S16_LE ? 0xbe : 0xce)); sb_dsp_command(devc, ((devc->channels == 2 ? 0x20 : 0) + (devc->bits == AFMT_S16_LE ? 0x10 : 0))); sb_dsp_command(devc, (unsigned char) (cnt & 0xff)); sb_dsp_command(devc, (unsigned char) (cnt >> 8)); spin_unlock_irqrestore(&devc->lock, flags); } static void sb16_audio_trigger(int dev, int bits) { sb_devc *devc = audio_devs[dev]->devc; int bits_16 = bits & devc->irq_mode_16; bits &= devc->irq_mode; if (!bits && !bits_16) sb_dsp_command(devc, 0xd0); /* Halt DMA */ else { if (bits) { switch (devc->irq_mode) { case IMODE_INPUT: sb16_audio_start_input(dev, devc->trg_buf, devc->trg_bytes, devc->trg_intrflag); break; case IMODE_OUTPUT: sb16_audio_output_block(dev, devc->trg_buf, devc->trg_bytes, devc->trg_intrflag); break; } } if (bits_16) { switch (devc->irq_mode_16) { case IMODE_INPUT: sb16_audio_start_input(dev, devc->trg_buf_16, devc->trg_bytes_16, devc->trg_intrflag_16); break; case IMODE_OUTPUT: sb16_audio_output_block(dev, devc->trg_buf_16, devc->trg_bytes_16, devc->trg_intrflag_16); break; } } } devc->trigger_bits = bits | bits_16; } static unsigned char lbuf8[2048]; static signed short *lbuf16 = (signed short *)lbuf8; #define LBUFCOPYSIZE 1024 static void sb16_copy_from_user(int dev, char *localbuf, int localoffs, const char __user *userbuf, int useroffs, int max_in, int max_out, int *used, int *returned, int len) { sb_devc *devc = audio_devs[dev]->devc; int i, c, p, locallen; unsigned char *buf8; signed short *buf16; /* if not duplex no conversion */ if (!devc->fullduplex) { if (copy_from_user(localbuf + localoffs, userbuf + useroffs, len)) return; *used = len; *returned = len; } else if (devc->bits == AFMT_S16_LE) { /* 16 -> 8 */ /* max_in >> 1, max number of samples in ( 16 bits ) */ /* max_out, max number of samples out ( 8 bits ) */ /* len, number of samples that will be taken ( 16 bits )*/ /* c, count of samples remaining in buffer ( 16 bits )*/ /* p, count of samples already processed ( 16 bits )*/ len = ( (max_in >> 1) > max_out) ? max_out : (max_in >> 1); c = len; p = 0; buf8 = (unsigned char *)(localbuf + localoffs); while (c) { locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); /* << 1 in order to get 16 bit samples */ if (copy_from_user(lbuf16, userbuf + useroffs + (p << 1), locallen << 1)) return; for (i = 0; i < locallen; i++) { buf8[p+i] = ~((lbuf16[i] >> 8) & 0xff) ^ 0x80; } c -= locallen; p += locallen; } /* used = ( samples * 16 bits size ) */ *used = max_in > ( max_out << 1) ? (max_out << 1) : max_in; /* returned = ( samples * 8 bits size ) */ *returned = len; } else { /* 8 -> 16 */ /* max_in, max number of samples in ( 8 bits ) */ /* max_out >> 1, max number of samples out ( 16 bits ) */ /* len, number of samples that will be taken ( 8 bits )*/ /* c, count of samples remaining in buffer ( 8 bits )*/ /* p, count of samples already processed ( 8 bits )*/ len = max_in > (max_out >> 1) ? (max_out >> 1) : max_in; c = len; p = 0; buf16 = (signed short *)(localbuf + localoffs); while (c) { locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); if (copy_from_user(lbuf8, userbuf+useroffs + p, locallen)) return; for (i = 0; i < locallen; i++) { buf16[p+i] = (~lbuf8[i] ^ 0x80) << 8; } c -= locallen; p += locallen; } /* used = ( samples * 8 bits size ) */ *used = len; /* returned = ( samples * 16 bits size ) */ *returned = len << 1; } } static void sb16_audio_mmap(int dev) { sb_devc *devc = audio_devs[dev]->devc; devc->fullduplex = 0; } static struct audio_driver sb1_audio_driver = /* SB1.x */ { .owner = THIS_MODULE, .open = sb_audio_open, .close = sb_audio_close, .output_block = sb_set_output_parms, .start_input = sb_set_input_parms, .prepare_for_input = sb1_audio_prepare_for_input, .prepare_for_output = sb1_audio_prepare_for_output, .halt_io = sb1_audio_halt_xfer, .trigger = sb1_audio_trigger, .set_speed = sb1_audio_set_speed, .set_bits = sb1_audio_set_bits, .set_channels = sb1_audio_set_channels }; static struct audio_driver sb20_audio_driver = /* SB2.0 */ { .owner = THIS_MODULE, .open = sb_audio_open, .close = sb_audio_close, .output_block = sb_set_output_parms, .start_input = sb_set_input_parms, .prepare_for_input = sb1_audio_prepare_for_input, .prepare_for_output = sb1_audio_prepare_for_output, .halt_io = sb1_audio_halt_xfer, .trigger = sb20_audio_trigger, .set_speed = sb1_audio_set_speed, .set_bits = sb1_audio_set_bits, .set_channels = sb1_audio_set_channels }; static struct audio_driver sb201_audio_driver = /* SB2.01 */ { .owner = THIS_MODULE, .open = sb_audio_open, .close = sb_audio_close, .output_block = sb_set_output_parms, .start_input = sb_set_input_parms, .prepare_for_input = sb1_audio_prepare_for_input, .prepare_for_output = sb1_audio_prepare_for_output, .halt_io = sb1_audio_halt_xfer, .trigger = sb20_audio_trigger, .set_speed = sb201_audio_set_speed, .set_bits = sb1_audio_set_bits, .set_channels = sb1_audio_set_channels }; static struct audio_driver sbpro_audio_driver = /* SB Pro */ { .owner = THIS_MODULE, .open = sb_audio_open, .close = sb_audio_close, .output_block = sb_set_output_parms, .start_input = sb_set_input_parms, .prepare_for_input = sbpro_audio_prepare_for_input, .prepare_for_output = sbpro_audio_prepare_for_output, .halt_io = sb1_audio_halt_xfer, .trigger = sb20_audio_trigger, .set_speed = sbpro_audio_set_speed, .set_bits = sb1_audio_set_bits, .set_channels = sbpro_audio_set_channels }; static struct audio_driver jazz16_audio_driver = /* Jazz16 and SM Wave */ { .owner = THIS_MODULE, .open = sb_audio_open, .close = sb_audio_close, .output_block = sb_set_output_parms, .start_input = sb_set_input_parms, .prepare_for_input = sbpro_audio_prepare_for_input, .prepare_for_output = sbpro_audio_prepare_for_output, .halt_io = sb1_audio_halt_xfer, .trigger = sb20_audio_trigger, .set_speed = jazz16_audio_set_speed, .set_bits = sb16_audio_set_bits, .set_channels = sbpro_audio_set_channels }; static struct audio_driver sb16_audio_driver = /* SB16 */ { .owner = THIS_MODULE, .open = sb_audio_open, .close = sb_audio_close, .output_block = sb_set_output_parms, .start_input = sb_set_input_parms, .prepare_for_input = sb16_audio_prepare_for_input, .prepare_for_output = sb16_audio_prepare_for_output, .halt_io = sb1_audio_halt_xfer, .copy_user = sb16_copy_from_user, .trigger = sb16_audio_trigger, .set_speed = sb16_audio_set_speed, .set_bits = sb16_audio_set_bits, .set_channels = sbpro_audio_set_channels, .mmap = sb16_audio_mmap }; void sb_audio_init(sb_devc * devc, char *name, struct module *owner) { int audio_flags = 0; int format_mask = AFMT_U8; struct audio_driver *driver = &sb1_audio_driver; switch (devc->model) { case MDL_SB1: /* SB1.0 or SB 1.5 */ DDB(printk("Will use standard SB1.x driver\n")); audio_flags = DMA_HARDSTOP; break; case MDL_SB2: DDB(printk("Will use SB2.0 driver\n")); audio_flags = DMA_AUTOMODE; driver = &sb20_audio_driver; break; case MDL_SB201: DDB(printk("Will use SB2.01 (high speed) driver\n")); audio_flags = DMA_AUTOMODE; driver = &sb201_audio_driver; break; case MDL_JAZZ: case MDL_SMW: DDB(printk("Will use Jazz16 driver\n")); audio_flags = DMA_AUTOMODE; format_mask |= AFMT_S16_LE; driver = &jazz16_audio_driver; break; case MDL_ESS: DDB(printk("Will use ESS ES688/1688 driver\n")); driver = ess_audio_init (devc, &audio_flags, &format_mask); break; case MDL_SB16: DDB(printk("Will use SB16 driver\n")); audio_flags = DMA_AUTOMODE; format_mask |= AFMT_S16_LE; if (devc->dma8 != devc->dma16 && devc->dma16 != -1) { audio_flags |= DMA_DUPLEX; devc->duplex = 1; } driver = &sb16_audio_driver; break; default: DDB(printk("Will use SB Pro driver\n")); audio_flags = DMA_AUTOMODE; driver = &sbpro_audio_driver; } if (owner) driver->owner = owner; if ((devc->dev = sound_install_audiodrv(AUDIO_DRIVER_VERSION, name,driver, sizeof(struct audio_driver), audio_flags, format_mask, devc, devc->dma8, devc->duplex ? devc->dma16 : devc->dma8)) < 0) { printk(KERN_ERR "Sound Blaster: unable to install audio.\n"); return; } audio_devs[devc->dev]->mixer_dev = devc->my_mixerdev; audio_devs[devc->dev]->min_fragment = 5; }
gpl-2.0
arnoldthebat/linux-stable
drivers/i2c/busses/i2c-uniphier-f.c
56
17127
/* * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #define UNIPHIER_FI2C_CR 0x00 /* control register */ #define UNIPHIER_FI2C_CR_MST BIT(3) /* master mode */ #define UNIPHIER_FI2C_CR_STA BIT(2) /* start condition */ #define UNIPHIER_FI2C_CR_STO BIT(1) /* stop condition */ #define UNIPHIER_FI2C_CR_NACK BIT(0) /* do not return ACK */ #define UNIPHIER_FI2C_DTTX 0x04 /* TX FIFO */ #define UNIPHIER_FI2C_DTTX_CMD BIT(8) /* send command (slave addr) */ #define UNIPHIER_FI2C_DTTX_RD BIT(0) /* read transaction */ #define UNIPHIER_FI2C_DTRX 0x04 /* RX FIFO */ #define UNIPHIER_FI2C_SLAD 0x0c /* slave address */ #define UNIPHIER_FI2C_CYC 0x10 /* clock cycle control */ #define UNIPHIER_FI2C_LCTL 0x14 /* clock low period control */ #define UNIPHIER_FI2C_SSUT 0x18 /* restart/stop setup time control */ #define UNIPHIER_FI2C_DSUT 0x1c /* data setup time control */ #define UNIPHIER_FI2C_INT 0x20 /* interrupt status */ #define UNIPHIER_FI2C_IE 0x24 /* interrupt enable */ #define UNIPHIER_FI2C_IC 0x28 /* interrupt clear */ #define UNIPHIER_FI2C_INT_TE BIT(9) /* TX FIFO empty */ #define UNIPHIER_FI2C_INT_RF BIT(8) /* RX FIFO full */ #define UNIPHIER_FI2C_INT_TC BIT(7) /* send complete (STOP) */ #define UNIPHIER_FI2C_INT_RC BIT(6) /* receive complete (STOP) */ #define UNIPHIER_FI2C_INT_TB BIT(5) /* sent specified bytes */ #define UNIPHIER_FI2C_INT_RB BIT(4) /* received specified bytes */ #define UNIPHIER_FI2C_INT_NA BIT(2) /* no ACK */ #define UNIPHIER_FI2C_INT_AL BIT(1) /* arbitration lost */ #define UNIPHIER_FI2C_SR 0x2c /* status register */ #define UNIPHIER_FI2C_SR_DB BIT(12) /* device busy */ #define UNIPHIER_FI2C_SR_STS BIT(11) /* stop condition detected */ #define UNIPHIER_FI2C_SR_BB BIT(8) /* bus busy */ #define UNIPHIER_FI2C_SR_RFF BIT(3) /* RX FIFO full */ #define UNIPHIER_FI2C_SR_RNE BIT(2) /* RX FIFO not empty */ #define UNIPHIER_FI2C_SR_TNF BIT(1) /* TX FIFO not full */ #define UNIPHIER_FI2C_SR_TFE BIT(0) /* TX FIFO empty */ #define UNIPHIER_FI2C_RST 0x34 /* reset control */ #define UNIPHIER_FI2C_RST_TBRST BIT(2) /* clear TX FIFO */ #define UNIPHIER_FI2C_RST_RBRST BIT(1) /* clear RX FIFO */ #define UNIPHIER_FI2C_RST_RST BIT(0) /* forcible bus reset */ #define UNIPHIER_FI2C_BM 0x38 /* bus monitor */ #define UNIPHIER_FI2C_BM_SDAO BIT(3) /* output for SDA line */ #define UNIPHIER_FI2C_BM_SDAS BIT(2) /* readback of SDA line */ #define UNIPHIER_FI2C_BM_SCLO BIT(1) /* output for SCL line */ #define UNIPHIER_FI2C_BM_SCLS BIT(0) /* readback of SCL line */ #define UNIPHIER_FI2C_NOISE 0x3c /* noise filter control */ #define UNIPHIER_FI2C_TBC 0x40 /* TX byte count setting */ #define UNIPHIER_FI2C_RBC 0x44 /* RX byte count setting */ #define UNIPHIER_FI2C_TBCM 0x48 /* TX byte count monitor */ #define UNIPHIER_FI2C_RBCM 0x4c /* RX byte count monitor */ #define UNIPHIER_FI2C_BRST 0x50 /* bus reset */ #define UNIPHIER_FI2C_BRST_FOEN BIT(1) /* normal operation */ #define UNIPHIER_FI2C_BRST_RSCL BIT(0) /* release SCL */ #define UNIPHIER_FI2C_INT_FAULTS \ (UNIPHIER_FI2C_INT_NA | UNIPHIER_FI2C_INT_AL) #define UNIPHIER_FI2C_INT_STOP \ (UNIPHIER_FI2C_INT_TC | UNIPHIER_FI2C_INT_RC) #define UNIPHIER_FI2C_RD BIT(0) #define UNIPHIER_FI2C_STOP BIT(1) #define UNIPHIER_FI2C_MANUAL_NACK BIT(2) #define UNIPHIER_FI2C_BYTE_WISE BIT(3) #define UNIPHIER_FI2C_DEFER_STOP_COMP BIT(4) #define UNIPHIER_FI2C_DEFAULT_SPEED 100000 #define UNIPHIER_FI2C_MAX_SPEED 400000 #define UNIPHIER_FI2C_FIFO_SIZE 8 struct uniphier_fi2c_priv { struct completion comp; struct i2c_adapter adap; void __iomem *membase; struct clk *clk; unsigned int len; u8 *buf; u32 enabled_irqs; int error; unsigned int flags; unsigned int busy_cnt; }; static void uniphier_fi2c_fill_txfifo(struct uniphier_fi2c_priv *priv, bool first) { int fifo_space = UNIPHIER_FI2C_FIFO_SIZE; /* * TX-FIFO stores slave address in it for the first access. * Decrement the counter. */ if (first) fifo_space--; while (priv->len) { if (fifo_space-- <= 0) break; dev_dbg(&priv->adap.dev, "write data: %02x\n", *priv->buf); writel(*priv->buf++, priv->membase + UNIPHIER_FI2C_DTTX); priv->len--; } } static void uniphier_fi2c_drain_rxfifo(struct uniphier_fi2c_priv *priv) { int fifo_left = priv->flags & UNIPHIER_FI2C_BYTE_WISE ? 1 : UNIPHIER_FI2C_FIFO_SIZE; while (priv->len) { if (fifo_left-- <= 0) break; *priv->buf++ = readl(priv->membase + UNIPHIER_FI2C_DTRX); dev_dbg(&priv->adap.dev, "read data: %02x\n", priv->buf[-1]); priv->len--; } } static void uniphier_fi2c_set_irqs(struct uniphier_fi2c_priv *priv) { writel(priv->enabled_irqs, priv->membase + UNIPHIER_FI2C_IE); } static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv) { writel(-1, priv->membase + UNIPHIER_FI2C_IC); } static void uniphier_fi2c_stop(struct uniphier_fi2c_priv *priv) { dev_dbg(&priv->adap.dev, "stop condition\n"); priv->enabled_irqs |= UNIPHIER_FI2C_INT_STOP; uniphier_fi2c_set_irqs(priv); writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STO, priv->membase + UNIPHIER_FI2C_CR); } static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id) { struct uniphier_fi2c_priv *priv = dev_id; u32 irq_status; irq_status = readl(priv->membase + UNIPHIER_FI2C_INT); dev_dbg(&priv->adap.dev, "interrupt: enabled_irqs=%04x, irq_status=%04x\n", priv->enabled_irqs, irq_status); if (irq_status & UNIPHIER_FI2C_INT_STOP) goto complete; if (unlikely(irq_status & UNIPHIER_FI2C_INT_AL)) { dev_dbg(&priv->adap.dev, "arbitration lost\n"); priv->error = -EAGAIN; goto complete; } if (unlikely(irq_status & UNIPHIER_FI2C_INT_NA)) { dev_dbg(&priv->adap.dev, "could not get ACK\n"); priv->error = -ENXIO; if (priv->flags & UNIPHIER_FI2C_RD) { /* * work around a hardware bug: * The receive-completed interrupt is never set even if * STOP condition is detected after the address phase * of read transaction fails to get ACK. * To avoid time-out error, we issue STOP here, * but do not wait for its completion. * It should be checked after exiting this handler. */ uniphier_fi2c_stop(priv); priv->flags |= UNIPHIER_FI2C_DEFER_STOP_COMP; goto complete; } goto stop; } if (irq_status & UNIPHIER_FI2C_INT_TE) { if (!priv->len) goto data_done; uniphier_fi2c_fill_txfifo(priv, false); goto handled; } if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) { uniphier_fi2c_drain_rxfifo(priv); if (!priv->len) goto data_done; if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) { if (priv->len <= UNIPHIER_FI2C_FIFO_SIZE && !(priv->flags & UNIPHIER_FI2C_BYTE_WISE)) { dev_dbg(&priv->adap.dev, "enable read byte count IRQ\n"); priv->enabled_irqs |= UNIPHIER_FI2C_INT_RB; uniphier_fi2c_set_irqs(priv); priv->flags |= UNIPHIER_FI2C_BYTE_WISE; } if (priv->len <= 1) { dev_dbg(&priv->adap.dev, "set NACK\n"); writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_NACK, priv->membase + UNIPHIER_FI2C_CR); } } goto handled; } return IRQ_NONE; data_done: if (priv->flags & UNIPHIER_FI2C_STOP) { stop: uniphier_fi2c_stop(priv); } else { complete: priv->enabled_irqs = 0; uniphier_fi2c_set_irqs(priv); complete(&priv->comp); } handled: uniphier_fi2c_clear_irqs(priv); return IRQ_HANDLED; } static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr) { priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE; /* do not use TX byte counter */ writel(0, priv->membase + UNIPHIER_FI2C_TBC); /* set slave address */ writel(UNIPHIER_FI2C_DTTX_CMD | addr << 1, priv->membase + UNIPHIER_FI2C_DTTX); /* first chunk of data */ uniphier_fi2c_fill_txfifo(priv, true); } static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr) { priv->flags |= UNIPHIER_FI2C_RD; if (likely(priv->len < 256)) { /* * If possible, use RX byte counter. * It can automatically handle NACK for the last byte. */ writel(priv->len, priv->membase + UNIPHIER_FI2C_RBC); priv->enabled_irqs |= UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB; } else { /* * The byte counter can not count over 256. In this case, * do not use it at all. Drain data when FIFO gets full, * but treat the last portion as a special case. */ writel(0, priv->membase + UNIPHIER_FI2C_RBC); priv->flags |= UNIPHIER_FI2C_MANUAL_NACK; priv->enabled_irqs |= UNIPHIER_FI2C_INT_RF; } /* set slave address with RD bit */ writel(UNIPHIER_FI2C_DTTX_CMD | UNIPHIER_FI2C_DTTX_RD | addr << 1, priv->membase + UNIPHIER_FI2C_DTTX); } static void uniphier_fi2c_reset(struct uniphier_fi2c_priv *priv) { writel(UNIPHIER_FI2C_RST_RST, priv->membase + UNIPHIER_FI2C_RST); } static void uniphier_fi2c_prepare_operation(struct uniphier_fi2c_priv *priv) { writel(UNIPHIER_FI2C_BRST_FOEN | UNIPHIER_FI2C_BRST_RSCL, priv->membase + UNIPHIER_FI2C_BRST); } static void uniphier_fi2c_recover(struct uniphier_fi2c_priv *priv) { uniphier_fi2c_reset(priv); i2c_recover_bus(&priv->adap); } static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap, struct i2c_msg *msg, bool stop) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); bool is_read = msg->flags & I2C_M_RD; unsigned long time_left; dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, stop=%d\n", is_read ? "receive" : "transmit", msg->addr, msg->len, stop); priv->len = msg->len; priv->buf = msg->buf; priv->enabled_irqs = UNIPHIER_FI2C_INT_FAULTS; priv->error = 0; priv->flags = 0; if (stop) priv->flags |= UNIPHIER_FI2C_STOP; reinit_completion(&priv->comp); uniphier_fi2c_clear_irqs(priv); writel(UNIPHIER_FI2C_RST_TBRST | UNIPHIER_FI2C_RST_RBRST, priv->membase + UNIPHIER_FI2C_RST); /* reset TX/RX FIFO */ if (is_read) uniphier_fi2c_rx_init(priv, msg->addr); else uniphier_fi2c_tx_init(priv, msg->addr); uniphier_fi2c_set_irqs(priv); dev_dbg(&adap->dev, "start condition\n"); writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA, priv->membase + UNIPHIER_FI2C_CR); time_left = wait_for_completion_timeout(&priv->comp, adap->timeout); if (!time_left) { dev_err(&adap->dev, "transaction timeout.\n"); uniphier_fi2c_recover(priv); return -ETIMEDOUT; } dev_dbg(&adap->dev, "complete\n"); if (unlikely(priv->flags & UNIPHIER_FI2C_DEFER_STOP_COMP)) { u32 status = readl(priv->membase + UNIPHIER_FI2C_SR); if (!(status & UNIPHIER_FI2C_SR_STS) || status & UNIPHIER_FI2C_SR_BB) { dev_err(&adap->dev, "stop condition was not completed.\n"); uniphier_fi2c_recover(priv); return -EBUSY; } } return priv->error; } static int uniphier_fi2c_check_bus_busy(struct i2c_adapter *adap) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); if (readl(priv->membase + UNIPHIER_FI2C_SR) & UNIPHIER_FI2C_SR_DB) { if (priv->busy_cnt++ > 3) { /* * If bus busy continues too long, it is probably * in a wrong state. Try bus recovery. */ uniphier_fi2c_recover(priv); priv->busy_cnt = 0; } return -EAGAIN; } priv->busy_cnt = 0; return 0; } static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *msg, *emsg = msgs + num; int ret; ret = uniphier_fi2c_check_bus_busy(adap); if (ret) return ret; for (msg = msgs; msg < emsg; msg++) { /* If next message is read, skip the stop condition */ bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); /* but, force it if I2C_M_STOP is set */ if (msg->flags & I2C_M_STOP) stop = true; ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); if (ret) return ret; } return num; } static u32 uniphier_fi2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm uniphier_fi2c_algo = { .master_xfer = uniphier_fi2c_master_xfer, .functionality = uniphier_fi2c_functionality, }; static int uniphier_fi2c_get_scl(struct i2c_adapter *adap) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); return !!(readl(priv->membase + UNIPHIER_FI2C_BM) & UNIPHIER_FI2C_BM_SCLS); } static void uniphier_fi2c_set_scl(struct i2c_adapter *adap, int val) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); writel(val ? UNIPHIER_FI2C_BRST_RSCL : 0, priv->membase + UNIPHIER_FI2C_BRST); } static int uniphier_fi2c_get_sda(struct i2c_adapter *adap) { struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap); return !!(readl(priv->membase + UNIPHIER_FI2C_BM) & UNIPHIER_FI2C_BM_SDAS); } static void uniphier_fi2c_unprepare_recovery(struct i2c_adapter *adap) { uniphier_fi2c_prepare_operation(i2c_get_adapdata(adap)); } static struct i2c_bus_recovery_info uniphier_fi2c_bus_recovery_info = { .recover_bus = i2c_generic_scl_recovery, .get_scl = uniphier_fi2c_get_scl, .set_scl = uniphier_fi2c_set_scl, .get_sda = uniphier_fi2c_get_sda, .unprepare_recovery = uniphier_fi2c_unprepare_recovery, }; static int uniphier_fi2c_clk_init(struct device *dev, struct uniphier_fi2c_priv *priv) { struct device_node *np = dev->of_node; unsigned long clk_rate; u32 bus_speed, clk_count; int ret; if (of_property_read_u32(np, "clock-frequency", &bus_speed)) bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED; if (!bus_speed) { dev_err(dev, "clock-frequency should not be zero\n"); return -EINVAL; } if (bus_speed > UNIPHIER_FI2C_MAX_SPEED) bus_speed = UNIPHIER_FI2C_MAX_SPEED; /* Get input clk rate through clk driver */ priv->clk = devm_clk_get(dev, NULL); if (IS_ERR(priv->clk)) { dev_err(dev, "failed to get clock\n"); return PTR_ERR(priv->clk); } ret = clk_prepare_enable(priv->clk); if (ret) return ret; clk_rate = clk_get_rate(priv->clk); if (!clk_rate) { dev_err(dev, "input clock rate should not be zero\n"); return -EINVAL; } uniphier_fi2c_reset(priv); clk_count = clk_rate / bus_speed; writel(clk_count, priv->membase + UNIPHIER_FI2C_CYC); writel(clk_count / 2, priv->membase + UNIPHIER_FI2C_LCTL); writel(clk_count / 2, priv->membase + UNIPHIER_FI2C_SSUT); writel(clk_count / 16, priv->membase + UNIPHIER_FI2C_DSUT); uniphier_fi2c_prepare_operation(priv); return 0; } static int uniphier_fi2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct uniphier_fi2c_priv *priv; struct resource *regs; int irq; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->membase = devm_ioremap_resource(dev, regs); if (IS_ERR(priv->membase)) return PTR_ERR(priv->membase); irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "failed to get IRQ number"); return irq; } init_completion(&priv->comp); priv->adap.owner = THIS_MODULE; priv->adap.algo = &uniphier_fi2c_algo; priv->adap.dev.parent = dev; priv->adap.dev.of_node = dev->of_node; strlcpy(priv->adap.name, "UniPhier FI2C", sizeof(priv->adap.name)); priv->adap.bus_recovery_info = &uniphier_fi2c_bus_recovery_info; i2c_set_adapdata(&priv->adap, priv); platform_set_drvdata(pdev, priv); ret = uniphier_fi2c_clk_init(dev, priv); if (ret) goto err; ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0, pdev->name, priv); if (ret) { dev_err(dev, "failed to request irq %d\n", irq); goto err; } ret = i2c_add_adapter(&priv->adap); if (ret) { dev_err(dev, "failed to add I2C adapter\n"); goto err; } err: if (ret) clk_disable_unprepare(priv->clk); return ret; } static int uniphier_fi2c_remove(struct platform_device *pdev) { struct uniphier_fi2c_priv *priv = platform_get_drvdata(pdev); i2c_del_adapter(&priv->adap); clk_disable_unprepare(priv->clk); return 0; } static const struct of_device_id uniphier_fi2c_match[] = { { .compatible = "socionext,uniphier-fi2c" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_fi2c_match); static struct platform_driver uniphier_fi2c_drv = { .probe = uniphier_fi2c_probe, .remove = uniphier_fi2c_remove, .driver = { .name = "uniphier-fi2c", .of_match_table = uniphier_fi2c_match, }, }; module_platform_driver(uniphier_fi2c_drv); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier FIFO-builtin I2C bus driver"); MODULE_LICENSE("GPL");
gpl-2.0
TheGreatSega/RUSH-KERNEL-RC
drivers/block/cciss.c
56
130919
/* * Disk Array driver for HP Smart Array controllers. * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA. * * Questions/Comments/Bugfixes to iss_storagedev@hp.com * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/delay.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/bio.h> #include <linux/blkpg.h> #include <linux/timer.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/hdreg.h> #include <linux/spinlock.h> #include <linux/compat.h> #include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/dma-mapping.h> #include <linux/blkdev.h> #include <linux/genhd.h> #include <linux/completion.h> #include <scsi/scsi.h> #include <scsi/sg.h> #include <scsi/scsi_ioctl.h> #include <linux/cdrom.h> #include <linux/scatterlist.h> #include <linux/kthread.h> #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) #define DRIVER_NAME "HP CISS Driver (v 3.6.20)" #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" " SA6i P600 P800 P400 P400i E200 E200i E500 P700m" " Smart Array G2 Series SAS/SATA Controllers"); MODULE_VERSION("3.6.20"); MODULE_LICENSE("GPL"); static int cciss_allow_hpsa; module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(cciss_allow_hpsa, "Prevent cciss driver from accessing hardware known to be " " supported by the hpsa driver"); #include "cciss_cmd.h" #include "cciss.h" #include <linux/cciss_ioctl.h> /* define the PCI info for the cards we can control */ static const struct pci_device_id cciss_pci_device_id[] = { {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, {0,} }; MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); /* board_id = Subsystem Device ID & Vendor ID * product = Marketing Name for the board * access = Address of the struct of function pointers */ static struct board_type products[] = { {0x40700E11, "Smart Array 5300", &SA5_access}, {0x40800E11, "Smart Array 5i", &SA5B_access}, {0x40820E11, "Smart Array 532", &SA5B_access}, {0x40830E11, "Smart Array 5312", &SA5B_access}, {0x409A0E11, "Smart Array 641", &SA5_access}, {0x409B0E11, "Smart Array 642", &SA5_access}, {0x409C0E11, "Smart Array 6400", &SA5_access}, {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, {0x40910E11, "Smart Array 6i", &SA5_access}, {0x3225103C, "Smart Array P600", &SA5_access}, {0x3235103C, "Smart Array P400i", &SA5_access}, {0x3211103C, "Smart Array E200i", &SA5_access}, {0x3212103C, "Smart Array E200", &SA5_access}, {0x3213103C, "Smart Array E200i", &SA5_access}, {0x3214103C, "Smart Array E200i", &SA5_access}, {0x3215103C, "Smart Array E200i", &SA5_access}, {0x3237103C, "Smart Array E500", &SA5_access}, /* controllers below this line are also supported by the hpsa driver. */ #define HPSA_BOUNDARY 0x3223103C {0x3223103C, "Smart Array P800", &SA5_access}, {0x3234103C, "Smart Array P400", &SA5_access}, {0x323D103C, "Smart Array P700m", &SA5_access}, {0x3241103C, "Smart Array P212", &SA5_access}, {0x3243103C, "Smart Array P410", &SA5_access}, {0x3245103C, "Smart Array P410i", &SA5_access}, {0x3247103C, "Smart Array P411", &SA5_access}, {0x3249103C, "Smart Array P812", &SA5_access}, {0x324A103C, "Smart Array P712m", &SA5_access}, {0x324B103C, "Smart Array P711m", &SA5_access}, }; /* How long to wait (in milliseconds) for board to go into simple mode */ #define MAX_CONFIG_WAIT 30000 #define MAX_IOCTL_CONFIG_WAIT 1000 /*define how many times we will try a command because of bus resets */ #define MAX_CMD_RETRIES 3 #define MAX_CTLR 32 /* Originally cciss driver only supports 8 major numbers */ #define MAX_CTLR_ORIG 8 static ctlr_info_t *hba[MAX_CTLR]; static struct task_struct *cciss_scan_thread; static DEFINE_MUTEX(scan_mutex); static LIST_HEAD(scan_q); static void do_cciss_request(struct request_queue *q); static irqreturn_t do_cciss_intr(int irq, void *dev_id); static int cciss_open(struct block_device *bdev, fmode_t mode); static int cciss_release(struct gendisk *disk, fmode_t mode); static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); static int cciss_revalidate(struct gendisk *disk); static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); static int deregister_disk(ctlr_info_t *h, int drv_index, int clear_all, int via_ioctl); static void cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size); static void cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size); static void cciss_geometry_inquiry(int ctlr, int logvol, int withirq, sector_t total_size, unsigned int block_size, InquiryData_struct *inq_buff, drive_info_struct *drv); static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32); static void start_io(ctlr_info_t *h); static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, int cmd_type); static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, __u8 page_code, unsigned char scsi3addr[], int cmd_type); static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, int attempt_retry); static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); static void fail_all_cmds(unsigned long ctlr); static int add_to_scan_list(struct ctlr_info *h); static int scan_thread(void *data); static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); static void cciss_hba_release(struct device *dev); static void cciss_device_release(struct device *dev); static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); #ifdef CONFIG_PROC_FS static void cciss_procinit(int i); #else static void cciss_procinit(int i) { } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int cciss_compat_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); #endif static const struct block_device_operations cciss_fops = { .owner = THIS_MODULE, .open = cciss_open, .release = cciss_release, .locked_ioctl = cciss_ioctl, .getgeo = cciss_getgeo, #ifdef CONFIG_COMPAT .compat_ioctl = cciss_compat_ioctl, #endif .revalidate_disk = cciss_revalidate, }; /* * Enqueuing and dequeuing functions for cmdlists. */ static inline void addQ(struct hlist_head *list, CommandList_struct *c) { hlist_add_head(&c->list, list); } static inline void removeQ(CommandList_struct *c) { /* * After kexec/dump some commands might still * be in flight, which the firmware will try * to complete. Resetting the firmware doesn't work * with old fw revisions, so we have to mark * them off as 'stale' to prevent the driver from * falling over. */ if (WARN_ON(hlist_unhashed(&c->list))) { c->cmd_type = CMD_MSG_STALE; return; } hlist_del_init(&c->list); } #include "cciss_scsi.c" /* For SCSI tape support */ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", "UNKNOWN" }; #define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1) #ifdef CONFIG_PROC_FS /* * Report information about this controller. */ #define ENG_GIG 1000000000 #define ENG_GIG_FACTOR (ENG_GIG/512) #define ENGAGE_SCSI "engage scsi" static struct proc_dir_entry *proc_cciss; static void cciss_seq_show_header(struct seq_file *seq) { ctlr_info_t *h = seq->private; seq_printf(seq, "%s: HP %s Controller\n" "Board ID: 0x%08lx\n" "Firmware Version: %c%c%c%c\n" "IRQ: %d\n" "Logical drives: %d\n" "Current Q depth: %d\n" "Current # commands on controller: %d\n" "Max Q depth since init: %d\n" "Max # commands on controller since init: %d\n" "Max SG entries since init: %d\n", h->devname, h->product_name, (unsigned long)h->board_id, h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], h->num_luns, h->Qdepth, h->commands_outstanding, h->maxQsinceinit, h->max_outstanding, h->maxSG); #ifdef CONFIG_CISS_SCSI_TAPE cciss_seq_tape_report(seq, h->ctlr); #endif /* CONFIG_CISS_SCSI_TAPE */ } static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) { ctlr_info_t *h = seq->private; unsigned ctlr = h->ctlr; unsigned long flags; /* prevent displaying bogus info during configuration * or deconfiguration of a logical volume */ spin_lock_irqsave(CCISS_LOCK(ctlr), flags); if (h->busy_configuring) { spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); return ERR_PTR(-EBUSY); } h->busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); if (*pos == 0) cciss_seq_show_header(seq); return pos; } static int cciss_seq_show(struct seq_file *seq, void *v) { sector_t vol_sz, vol_sz_frac; ctlr_info_t *h = seq->private; unsigned ctlr = h->ctlr; loff_t *pos = v; drive_info_struct *drv = h->drv[*pos]; if (*pos > h->highest_lun) return 0; if (drv == NULL) /* it's possible for h->drv[] to have holes. */ return 0; if (drv->heads == 0) return 0; vol_sz = drv->nr_blocks; vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); vol_sz_frac *= 100; sector_div(vol_sz_frac, ENG_GIG_FACTOR); if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) drv->raid_level = RAID_UNKNOWN; seq_printf(seq, "cciss/c%dd%d:" "\t%4u.%02uGB\tRAID %s\n", ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, raid_label[drv->raid_level]); return 0; } static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ctlr_info_t *h = seq->private; if (*pos > h->highest_lun) return NULL; *pos += 1; return pos; } static void cciss_seq_stop(struct seq_file *seq, void *v) { ctlr_info_t *h = seq->private; /* Only reset h->busy_configuring if we succeeded in setting * it during cciss_seq_start. */ if (v == ERR_PTR(-EBUSY)) return; h->busy_configuring = 0; } static const struct seq_operations cciss_seq_ops = { .start = cciss_seq_start, .show = cciss_seq_show, .next = cciss_seq_next, .stop = cciss_seq_stop, }; static int cciss_seq_open(struct inode *inode, struct file *file) { int ret = seq_open(file, &cciss_seq_ops); struct seq_file *seq = file->private_data; if (!ret) seq->private = PDE(inode)->data; return ret; } static ssize_t cciss_proc_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int err; char *buffer; #ifndef CONFIG_CISS_SCSI_TAPE return -EINVAL; #endif if (!buf || length > PAGE_SIZE - 1) return -EINVAL; buffer = (char *)__get_free_page(GFP_KERNEL); if (!buffer) return -ENOMEM; err = -EFAULT; if (copy_from_user(buffer, buf, length)) goto out; buffer[length] = '\0'; #ifdef CONFIG_CISS_SCSI_TAPE if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { struct seq_file *seq = file->private_data; ctlr_info_t *h = seq->private; int rc; rc = cciss_engage_scsi(h->ctlr); if (rc != 0) err = -rc; else err = length; } else #endif /* CONFIG_CISS_SCSI_TAPE */ err = -EINVAL; /* might be nice to have "disengage" too, but it's not safely possible. (only 1 module use count, lock issues.) */ out: free_page((unsigned long)buffer); return err; } static const struct file_operations cciss_proc_fops = { .owner = THIS_MODULE, .open = cciss_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .write = cciss_proc_write, }; static void __devinit cciss_procinit(int i) { struct proc_dir_entry *pde; if (proc_cciss == NULL) proc_cciss = proc_mkdir("driver/cciss", NULL); if (!proc_cciss) return; pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, proc_cciss, &cciss_proc_fops, hba[i]); } #endif /* CONFIG_PROC_FS */ #define MAX_PRODUCT_NAME_LEN 19 #define to_hba(n) container_of(n, struct ctlr_info, dev) #define to_drv(n) container_of(n, drive_info_struct, dev) static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ctlr_info *h = to_hba(dev); add_to_scan_list(h); wake_up_process(cciss_scan_thread); wait_for_completion_interruptible(&h->scan_wait); return count; } static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); static ssize_t dev_show_unique_id(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); __u8 sn[16]; unsigned long flags; int ret = 0; spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(sn, drv->serial_no, sizeof(sn)); spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); if (ret) return ret; else return snprintf(buf, 16 * 2 + 2, "%02X%02X%02X%02X%02X%02X%02X%02X" "%02X%02X%02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5], sn[6], sn[7], sn[8], sn[9], sn[10], sn[11], sn[12], sn[13], sn[14], sn[15]); } static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); static ssize_t dev_show_vendor(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char vendor[VENDOR_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(vendor, drv->vendor, VENDOR_LEN + 1); spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); if (ret) return ret; else return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); } static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); static ssize_t dev_show_model(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char model[MODEL_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(model, drv->model, MODEL_LEN + 1); spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); if (ret) return ret; else return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); } static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); static ssize_t dev_show_rev(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char rev[REV_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(rev, drv->rev, REV_LEN + 1); spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); if (ret) return ret; else return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); } static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); static ssize_t cciss_show_lunid(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); unsigned long flags; unsigned char lunid[8]; spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); if (h->busy_configuring) { spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return -EBUSY; } if (!drv->heads) { spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return -ENOTTY; } memcpy(lunid, drv->LunID, sizeof(lunid)); spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", lunid[0], lunid[1], lunid[2], lunid[3], lunid[4], lunid[5], lunid[6], lunid[7]); } static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); static ssize_t cciss_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); int raid; unsigned long flags; spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); if (h->busy_configuring) { spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return -EBUSY; } raid = drv->raid_level; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); if (raid < 0 || raid > RAID_UNKNOWN) raid = RAID_UNKNOWN; return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", raid_label[raid]); } static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); static ssize_t cciss_show_usage_count(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); unsigned long flags; int count; spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); if (h->busy_configuring) { spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return -EBUSY; } count = drv->usage_count; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return snprintf(buf, 20, "%d\n", count); } static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); static struct attribute *cciss_host_attrs[] = { &dev_attr_rescan.attr, NULL }; static struct attribute_group cciss_host_attr_group = { .attrs = cciss_host_attrs, }; static const struct attribute_group *cciss_host_attr_groups[] = { &cciss_host_attr_group, NULL }; static struct device_type cciss_host_type = { .name = "cciss_host", .groups = cciss_host_attr_groups, .release = cciss_hba_release, }; static struct attribute *cciss_dev_attrs[] = { &dev_attr_unique_id.attr, &dev_attr_model.attr, &dev_attr_vendor.attr, &dev_attr_rev.attr, &dev_attr_lunid.attr, &dev_attr_raid_level.attr, &dev_attr_usage_count.attr, NULL }; static struct attribute_group cciss_dev_attr_group = { .attrs = cciss_dev_attrs, }; static const struct attribute_group *cciss_dev_attr_groups[] = { &cciss_dev_attr_group, NULL }; static struct device_type cciss_dev_type = { .name = "cciss_device", .groups = cciss_dev_attr_groups, .release = cciss_device_release, }; static struct bus_type cciss_bus_type = { .name = "cciss", }; /* * cciss_hba_release is called when the reference count * of h->dev goes to zero. */ static void cciss_hba_release(struct device *dev) { /* * nothing to do, but need this to avoid a warning * about not having a release handler from lib/kref.c. */ } /* * Initialize sysfs entry for each controller. This sets up and registers * the 'cciss#' directory for each individual controller under * /sys/bus/pci/devices/<dev>/. */ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) { device_initialize(&h->dev); h->dev.type = &cciss_host_type; h->dev.bus = &cciss_bus_type; dev_set_name(&h->dev, "%s", h->devname); h->dev.parent = &h->pdev->dev; return device_add(&h->dev); } /* * Remove sysfs entries for an hba. */ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) { device_del(&h->dev); put_device(&h->dev); /* final put. */ } /* cciss_device_release is called when the reference count * of h->drv[x]dev goes to zero. */ static void cciss_device_release(struct device *dev) { drive_info_struct *drv = to_drv(dev); kfree(drv); } /* * Initialize sysfs for each logical drive. This sets up and registers * the 'c#d#' directory for each individual logical drive under * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from * /sys/block/cciss!c#d# to this entry. */ static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, int drv_index) { struct device *dev; if (h->drv[drv_index]->device_initialized) return 0; dev = &h->drv[drv_index]->dev; device_initialize(dev); dev->type = &cciss_dev_type; dev->bus = &cciss_bus_type; dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); dev->parent = &h->dev; h->drv[drv_index]->device_initialized = 1; return device_add(dev); } /* * Remove sysfs entries for a logical drive. */ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, int ctlr_exiting) { struct device *dev = &h->drv[drv_index]->dev; /* special case for c*d0, we only destroy it on controller exit */ if (drv_index == 0 && !ctlr_exiting) return; device_del(dev); put_device(dev); /* the "final" put. */ h->drv[drv_index] = NULL; } /* * For operations that cannot sleep, a command block is allocated at init, * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track * which ones are free or in use. For operations that can wait for kmalloc * to possible sleep, this routine can be called with get_from_pool set to 0. * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was. */ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool) { CommandList_struct *c; int i; u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; if (!get_from_pool) { c = (CommandList_struct *) pci_alloc_consistent(h->pdev, sizeof(CommandList_struct), &cmd_dma_handle); if (c == NULL) return NULL; memset(c, 0, sizeof(CommandList_struct)); c->cmdindex = -1; c->err_info = (ErrorInfo_struct *) pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), &err_dma_handle); if (c->err_info == NULL) { pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, cmd_dma_handle); return NULL; } memset(c->err_info, 0, sizeof(ErrorInfo_struct)); } else { /* get it out of the controllers pool */ do { i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); if (i == h->nr_cmds) return NULL; } while (test_and_set_bit (i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); #ifdef CCISS_DEBUG printk(KERN_DEBUG "cciss: using command buffer %d\n", i); #endif c = h->cmd_pool + i; memset(c, 0, sizeof(CommandList_struct)); cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct); c->err_info = h->errinfo_pool + i; memset(c->err_info, 0, sizeof(ErrorInfo_struct)); err_dma_handle = h->errinfo_pool_dhandle + i * sizeof(ErrorInfo_struct); h->nr_allocs++; c->cmdindex = i; } INIT_HLIST_NODE(&c->list); c->busaddr = (__u32) cmd_dma_handle; temp64.val = (__u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; c->ErrDesc.Addr.upper = temp64.val32.upper; c->ErrDesc.Len = sizeof(ErrorInfo_struct); c->ctlr = h->ctlr; return c; } /* * Frees a command block that was previously allocated with cmd_alloc(). */ static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool) { int i; u64bit temp64; if (!got_from_pool) { temp64.val32.lower = c->ErrDesc.Addr.lower; temp64.val32.upper = c->ErrDesc.Addr.upper; pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), c->err_info, (dma_addr_t) temp64.val); pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, (dma_addr_t) c->busaddr); } else { i = c - h->cmd_pool; clear_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)); h->nr_frees++; } } static inline ctlr_info_t *get_host(struct gendisk *disk) { return disk->queue->queuedata; } static inline drive_info_struct *get_drv(struct gendisk *disk) { return disk->private_data; } /* * Open. Make sure the device is really there. */ static int cciss_open(struct block_device *bdev, fmode_t mode) { ctlr_info_t *host = get_host(bdev->bd_disk); drive_info_struct *drv = get_drv(bdev->bd_disk); #ifdef CCISS_DEBUG printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); #endif /* CCISS_DEBUG */ if (drv->busy_configuring) return -EBUSY; /* * Root is allowed to open raw volume zero even if it's not configured * so array config can still work. Root is also allowed to open any * volume that has a LUN ID, so it can issue IOCTL to reread the * disk information. I don't think I really like this * but I'm already using way to many device nodes to claim another one * for "raw controller". */ if (drv->heads == 0) { if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */ /* if not node 0 make sure it is a partition = 0 */ if (MINOR(bdev->bd_dev) & 0x0f) { return -ENXIO; /* if it is, make sure we have a LUN ID */ } else if (memcmp(drv->LunID, CTLR_LUNID, sizeof(drv->LunID))) { return -ENXIO; } } if (!capable(CAP_SYS_ADMIN)) return -EPERM; } drv->usage_count++; host->usage_count++; return 0; } /* * Close. Sync first. */ static int cciss_release(struct gendisk *disk, fmode_t mode) { ctlr_info_t *host = get_host(disk); drive_info_struct *drv = get_drv(disk); #ifdef CCISS_DEBUG printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name); #endif /* CCISS_DEBUG */ drv->usage_count--; host->usage_count--; return 0; } #ifdef CONFIG_COMPAT static int do_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { int ret; lock_kernel(); ret = cciss_ioctl(bdev, mode, cmd, arg); unlock_kernel(); return ret; } static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg); static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg); static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { switch (cmd) { case CCISS_GETPCIINFO: case CCISS_GETINTINFO: case CCISS_SETINTINFO: case CCISS_GETNODENAME: case CCISS_SETNODENAME: case CCISS_GETHEARTBEAT: case CCISS_GETBUSTYPES: case CCISS_GETFIRMVER: case CCISS_GETDRIVVER: case CCISS_REVALIDVOLS: case CCISS_DEREGDISK: case CCISS_REGNEWDISK: case CCISS_REGNEWD: case CCISS_RESCANDISK: case CCISS_GETLUNINFO: return do_ioctl(bdev, mode, cmd, arg); case CCISS_PASSTHRU32: return cciss_ioctl32_passthru(bdev, mode, cmd, arg); case CCISS_BIG_PASSTHRU32: return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg); default: return -ENOIOCTLCMD; } } static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { IOCTL32_Command_struct __user *arg32 = (IOCTL32_Command_struct __user *) arg; IOCTL_Command_struct arg64; IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); err |= get_user(arg64.buf_size, &arg32->buf_size); err |= get_user(cp, &arg32->buf); arg64.buf = compat_ptr(cp); err |= copy_to_user(p, &arg64, sizeof(arg64)); if (err) return -EFAULT; err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); if (err) return -EFAULT; return err; } static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { BIG_IOCTL32_Command_struct __user *arg32 = (BIG_IOCTL32_Command_struct __user *) arg; BIG_IOCTL_Command_struct arg64; BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); err |= get_user(arg64.buf_size, &arg32->buf_size); err |= get_user(arg64.malloc_size, &arg32->malloc_size); err |= get_user(cp, &arg32->buf); arg64.buf = compat_ptr(cp); err |= copy_to_user(p, &arg64, sizeof(arg64)); if (err) return -EFAULT; err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); if (err) return -EFAULT; return err; } #endif static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) { drive_info_struct *drv = get_drv(bdev->bd_disk); if (!drv->cylinders) return -ENXIO; geo->heads = drv->heads; geo->sectors = drv->sectors; geo->cylinders = drv->cylinders; return 0; } static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c) { if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) (void)check_for_unit_attention(host, c); } /* * ioctl */ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; ctlr_info_t *host = get_host(disk); drive_info_struct *drv = get_drv(disk); int ctlr = host->ctlr; void __user *argp = (void __user *)arg; #ifdef CCISS_DEBUG printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg); #endif /* CCISS_DEBUG */ switch (cmd) { case CCISS_GETPCIINFO: { cciss_pci_info_struct pciinfo; if (!arg) return -EINVAL; pciinfo.domain = pci_domain_nr(host->pdev->bus); pciinfo.bus = host->pdev->bus->number; pciinfo.dev_fn = host->pdev->devfn; pciinfo.board_id = host->board_id; if (copy_to_user (argp, &pciinfo, sizeof(cciss_pci_info_struct))) return -EFAULT; return 0; } case CCISS_GETINTINFO: { cciss_coalint_struct intinfo; if (!arg) return -EINVAL; intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay); intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount); if (copy_to_user (argp, &intinfo, sizeof(cciss_coalint_struct))) return -EFAULT; return 0; } case CCISS_SETINTINFO: { cciss_coalint_struct intinfo; unsigned long flags; int i; if (!arg) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user (&intinfo, argp, sizeof(cciss_coalint_struct))) return -EFAULT; if ((intinfo.delay == 0) && (intinfo.count == 0)) { // printk("cciss_ioctl: delay and count cannot be 0\n"); return -EINVAL; } spin_lock_irqsave(CCISS_LOCK(ctlr), flags); /* Update the field, and then ring the doorbell */ writel(intinfo.delay, &(host->cfgtable->HostWrite.CoalIntDelay)); writel(intinfo.count, &(host->cfgtable->HostWrite.CoalIntCount)); writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { if (!(readl(host->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; /* delay and try again */ udelay(1000); } spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); if (i >= MAX_IOCTL_CONFIG_WAIT) return -EAGAIN; return 0; } case CCISS_GETNODENAME: { NodeName_type NodeName; int i; if (!arg) return -EINVAL; for (i = 0; i < 16; i++) NodeName[i] = readb(&host->cfgtable->ServerName[i]); if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) return -EFAULT; return 0; } case CCISS_SETNODENAME: { NodeName_type NodeName; unsigned long flags; int i; if (!arg) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user (NodeName, argp, sizeof(NodeName_type))) return -EFAULT; spin_lock_irqsave(CCISS_LOCK(ctlr), flags); /* Update the field, and then ring the doorbell */ for (i = 0; i < 16; i++) writeb(NodeName[i], &host->cfgtable->ServerName[i]); writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { if (!(readl(host->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; /* delay and try again */ udelay(1000); } spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); if (i >= MAX_IOCTL_CONFIG_WAIT) return -EAGAIN; return 0; } case CCISS_GETHEARTBEAT: { Heartbeat_type heartbeat; if (!arg) return -EINVAL; heartbeat = readl(&host->cfgtable->HeartBeat); if (copy_to_user (argp, &heartbeat, sizeof(Heartbeat_type))) return -EFAULT; return 0; } case CCISS_GETBUSTYPES: { BusTypes_type BusTypes; if (!arg) return -EINVAL; BusTypes = readl(&host->cfgtable->BusTypes); if (copy_to_user (argp, &BusTypes, sizeof(BusTypes_type))) return -EFAULT; return 0; } case CCISS_GETFIRMVER: { FirmwareVer_type firmware; if (!arg) return -EINVAL; memcpy(firmware, host->firm_ver, 4); if (copy_to_user (argp, firmware, sizeof(FirmwareVer_type))) return -EFAULT; return 0; } case CCISS_GETDRIVVER: { DriverVer_type DriverVer = DRIVER_VERSION; if (!arg) return -EINVAL; if (copy_to_user (argp, &DriverVer, sizeof(DriverVer_type))) return -EFAULT; return 0; } case CCISS_DEREGDISK: case CCISS_REGNEWD: case CCISS_REVALIDVOLS: return rebuild_lun_table(host, 0, 1); case CCISS_GETLUNINFO:{ LogvolInfo_struct luninfo; memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID)); luninfo.num_opens = drv->usage_count; luninfo.num_parts = 0; if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct))) return -EFAULT; return 0; } case CCISS_PASSTHRU: { IOCTL_Command_struct iocommand; CommandList_struct *c; char *buff = NULL; u64bit temp64; unsigned long flags; DECLARE_COMPLETION_ONSTACK(wait); if (!arg) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (copy_from_user (&iocommand, argp, sizeof(IOCTL_Command_struct))) return -EFAULT; if ((iocommand.buf_size < 1) && (iocommand.Request.Type.Direction != XFER_NONE)) { return -EINVAL; } #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */ /* Check kmalloc limits */ if (iocommand.buf_size > 128000) return -EINVAL; #endif if (iocommand.buf_size > 0) { buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) return -EFAULT; } if (iocommand.Request.Type.Direction == XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user (buff, iocommand.buf, iocommand.buf_size)) { kfree(buff); return -EFAULT; } } else { memset(buff, 0, iocommand.buf_size); } if ((c = cmd_alloc(host, 0)) == NULL) { kfree(buff); return -ENOMEM; } // Fill in the command type c->cmd_type = CMD_IOCTL_PEND; // Fill in Command Header c->Header.ReplyQueue = 0; // unused in simple mode if (iocommand.buf_size > 0) // buffer to fill { c->Header.SGList = 1; c->Header.SGTotal = 1; } else // no buffers to fill { c->Header.SGList = 0; c->Header.SGTotal = 0; } c->Header.LUN = iocommand.LUN_info; c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag // Fill in Request block c->Request = iocommand.Request; // Fill in the scatter gather information if (iocommand.buf_size > 0) { temp64.val = pci_map_single(host->pdev, buff, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); c->SG[0].Addr.lower = temp64.val32.lower; c->SG[0].Addr.upper = temp64.val32.upper; c->SG[0].Len = iocommand.buf_size; c->SG[0].Ext = 0; // we are not chaining } c->waiting = &wait; /* Put the request on the tail of the request queue */ spin_lock_irqsave(CCISS_LOCK(ctlr), flags); addQ(&host->reqQ, c); host->Qdepth++; start_io(host); spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); wait_for_completion(&wait); /* unlock the buffers from DMA */ temp64.val32.lower = c->SG[0].Addr.lower; temp64.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(host->pdev, (dma_addr_t) temp64.val, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(host, c); /* Copy the error information out */ iocommand.error_info = *(c->err_info); if (copy_to_user (argp, &iocommand, sizeof(IOCTL_Command_struct))) { kfree(buff); cmd_free(host, c, 0); return -EFAULT; } if (iocommand.Request.Type.Direction == XFER_READ) { /* Copy the data out of the buffer we created */ if (copy_to_user (iocommand.buf, buff, iocommand.buf_size)) { kfree(buff); cmd_free(host, c, 0); return -EFAULT; } } kfree(buff); cmd_free(host, c, 0); return 0; } case CCISS_BIG_PASSTHRU:{ BIG_IOCTL_Command_struct *ioc; CommandList_struct *c; unsigned char **buff = NULL; int *buff_size = NULL; u64bit temp64; unsigned long flags; BYTE sg_used = 0; int status = 0; int i; DECLARE_COMPLETION_ONSTACK(wait); __u32 left; __u32 sz; BYTE __user *data_ptr; if (!arg) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; ioc = (BIG_IOCTL_Command_struct *) kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) { status = -ENOMEM; goto cleanup1; } if (copy_from_user(ioc, argp, sizeof(*ioc))) { status = -EFAULT; goto cleanup1; } if ((ioc->buf_size < 1) && (ioc->Request.Type.Direction != XFER_NONE)) { status = -EINVAL; goto cleanup1; } /* Check kmalloc limits using all SGs */ if (ioc->malloc_size > MAX_KMALLOC_SIZE) { status = -EINVAL; goto cleanup1; } if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { status = -EINVAL; goto cleanup1; } buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); if (!buff) { status = -ENOMEM; goto cleanup1; } buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); if (!buff_size) { status = -ENOMEM; goto cleanup1; } left = ioc->buf_size; data_ptr = ioc->buf; while (left) { sz = (left > ioc->malloc_size) ? ioc-> malloc_size : left; buff_size[sg_used] = sz; buff[sg_used] = kmalloc(sz, GFP_KERNEL); if (buff[sg_used] == NULL) { status = -ENOMEM; goto cleanup1; } if (ioc->Request.Type.Direction == XFER_WRITE) { if (copy_from_user (buff[sg_used], data_ptr, sz)) { status = -EFAULT; goto cleanup1; } } else { memset(buff[sg_used], 0, sz); } left -= sz; data_ptr += sz; sg_used++; } if ((c = cmd_alloc(host, 0)) == NULL) { status = -ENOMEM; goto cleanup1; } c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; if (ioc->buf_size > 0) { c->Header.SGList = sg_used; c->Header.SGTotal = sg_used; } else { c->Header.SGList = 0; c->Header.SGTotal = 0; } c->Header.LUN = ioc->LUN_info; c->Header.Tag.lower = c->busaddr; c->Request = ioc->Request; if (ioc->buf_size > 0) { int i; for (i = 0; i < sg_used; i++) { temp64.val = pci_map_single(host->pdev, buff[i], buff_size[i], PCI_DMA_BIDIRECTIONAL); c->SG[i].Addr.lower = temp64.val32.lower; c->SG[i].Addr.upper = temp64.val32.upper; c->SG[i].Len = buff_size[i]; c->SG[i].Ext = 0; /* we are not chaining */ } } c->waiting = &wait; /* Put the request on the tail of the request queue */ spin_lock_irqsave(CCISS_LOCK(ctlr), flags); addQ(&host->reqQ, c); host->Qdepth++; start_io(host); spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); wait_for_completion(&wait); /* unlock the buffers from DMA */ for (i = 0; i < sg_used; i++) { temp64.val32.lower = c->SG[i].Addr.lower; temp64.val32.upper = c->SG[i].Addr.upper; pci_unmap_single(host->pdev, (dma_addr_t) temp64.val, buff_size[i], PCI_DMA_BIDIRECTIONAL); } check_ioctl_unit_attention(host, c); /* Copy the error information out */ ioc->error_info = *(c->err_info); if (copy_to_user(argp, ioc, sizeof(*ioc))) { cmd_free(host, c, 0); status = -EFAULT; goto cleanup1; } if (ioc->Request.Type.Direction == XFER_READ) { /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { if (copy_to_user (ptr, buff[i], buff_size[i])) { cmd_free(host, c, 0); status = -EFAULT; goto cleanup1; } ptr += buff_size[i]; } } cmd_free(host, c, 0); status = 0; cleanup1: if (buff) { for (i = 0; i < sg_used; i++) kfree(buff[i]); kfree(buff); } kfree(buff_size); kfree(ioc); return status; } /* scsi_cmd_blk_ioctl handles these, below, though some are not */ /* very meaningful for cciss. SG_IO is the main one people want. */ case SG_GET_VERSION_NUM: case SG_SET_TIMEOUT: case SG_GET_TIMEOUT: case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: case SG_IO: case SCSI_IOCTL_SEND_COMMAND: return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); /* scsi_cmd_blk_ioctl would normally handle these, below, but */ /* they aren't a good fit for cciss, as CD-ROMs are */ /* not supported, and we don't have any bus/target/lun */ /* which we present to the kernel. */ case CDROM_SEND_PACKET: case CDROMCLOSETRAY: case CDROMEJECT: case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: default: return -ENOTTY; } } static void cciss_check_queues(ctlr_info_t *h) { int start_queue = h->next_to_run; int i; /* check to see if we have maxed out the number of commands that can * be placed on the queue. If so then exit. We do this check here * in case the interrupt we serviced was from an ioctl and did not * free any new commands. */ if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) return; /* We have room on the queue for more commands. Now we need to queue * them up. We will also keep track of the next queue to run so * that every queue gets a chance to be started first. */ for (i = 0; i < h->highest_lun + 1; i++) { int curr_queue = (start_queue + i) % (h->highest_lun + 1); /* make sure the disk has been added and the drive is real * because this can be called from the middle of init_one. */ if (!h->drv[curr_queue]) continue; if (!(h->drv[curr_queue]->queue) || !(h->drv[curr_queue]->heads)) continue; blk_start_queue(h->gendisk[curr_queue]->queue); /* check to see if we have maxed out the number of commands * that can be placed on the queue. */ if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) { if (curr_queue == start_queue) { h->next_to_run = (start_queue + 1) % (h->highest_lun + 1); break; } else { h->next_to_run = curr_queue; break; } } } } static void cciss_softirq_done(struct request *rq) { CommandList_struct *cmd = rq->completion_data; ctlr_info_t *h = hba[cmd->ctlr]; unsigned long flags; u64bit temp64; int i, ddir; if (cmd->Request.Type.Direction == XFER_READ) ddir = PCI_DMA_FROMDEVICE; else ddir = PCI_DMA_TODEVICE; /* command did not need to be retried */ /* unmap the DMA mapping for all the scatter gather elements */ for (i = 0; i < cmd->Header.SGList; i++) { temp64.val32.lower = cmd->SG[i].Addr.lower; temp64.val32.upper = cmd->SG[i].Addr.upper; pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir); } #ifdef CCISS_DEBUG printk("Done with %p\n", rq); #endif /* CCISS_DEBUG */ /* set the residual count for pc requests */ if (blk_pc_request(rq)) rq->resid_len = cmd->err_info->ResidualCnt; blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); spin_lock_irqsave(&h->lock, flags); cmd_free(h, cmd, 1); cciss_check_queues(h); spin_unlock_irqrestore(&h->lock, flags); } static inline void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], uint32_t log_unit) { memcpy(scsi3addr, h->drv[log_unit]->LunID, sizeof(h->drv[log_unit]->LunID)); } /* This function gets the SCSI vendor, model, and revision of a logical drive * via the inquiry page 0. Model, vendor, and rev are set to empty strings if * they cannot be read. */ static void cciss_get_device_descr(int ctlr, int logvol, int withirq, char *vendor, char *model, char *rev) { int rc; InquiryData_struct *inq_buf; unsigned char scsi3addr[8]; *vendor = '\0'; *model = '\0'; *rev = '\0'; inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (!inq_buf) return; log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); if (withirq) rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, sizeof(InquiryData_struct), 0, scsi3addr, TYPE_CMD); else rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf, sizeof(InquiryData_struct), 0, scsi3addr, TYPE_CMD); if (rc == IO_OK) { memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); vendor[VENDOR_LEN] = '\0'; memcpy(model, &inq_buf->data_byte[16], MODEL_LEN); model[MODEL_LEN] = '\0'; memcpy(rev, &inq_buf->data_byte[32], REV_LEN); rev[REV_LEN] = '\0'; } kfree(inq_buf); return; } /* This function gets the serial number of a logical drive via * inquiry page 0x83. Serial no. is 16 bytes. If the serial * number cannot be had, for whatever reason, 16 bytes of 0xff * are returned instead. */ static void cciss_get_serial_no(int ctlr, int logvol, int withirq, unsigned char *serial_no, int buflen) { #define PAGE_83_INQ_BYTES 64 int rc; unsigned char *buf; unsigned char scsi3addr[8]; if (buflen > 16) buflen = 16; memset(serial_no, 0xff, buflen); buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL); if (!buf) return; memset(serial_no, 0, buflen); log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); if (withirq) rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); else rc = sendcmd(CISS_INQUIRY, ctlr, buf, PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); if (rc == IO_OK) memcpy(serial_no, &buf[8], buflen); kfree(buf); return; } /* * cciss_add_disk sets up the block device queue for a logical drive */ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, int drv_index) { disk->queue = blk_init_queue(do_cciss_request, &h->lock); if (!disk->queue) goto init_queue_failure; sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); disk->major = h->major; disk->first_minor = drv_index << NWD_SHIFT; disk->fops = &cciss_fops; if (cciss_create_ld_sysfs_entry(h, drv_index)) goto cleanup_queue; disk->private_data = h->drv[drv_index]; disk->driverfs_dev = &h->drv[drv_index]->dev; /* Set up queue information */ blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); /* This is a hardware imposed limit. */ blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES); /* This is a limit in the driver and could be eliminated. */ blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES); blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); blk_queue_softirq_done(disk->queue, cciss_softirq_done); disk->queue->queuedata = h; blk_queue_logical_block_size(disk->queue, h->drv[drv_index]->block_size); /* Make sure all queue data is written out before */ /* setting h->drv[drv_index]->queue, as setting this */ /* allows the interrupt handler to start the queue */ wmb(); h->drv[drv_index]->queue = disk->queue; add_disk(disk); return 0; cleanup_queue: blk_cleanup_queue(disk->queue); disk->queue = NULL; init_queue_failure: return -1; } /* This function will check the usage_count of the drive to be updated/added. * If the usage_count is zero and it is a heretofore unknown drive, or, * the drive's capacity, geometry, or serial number has changed, * then the drive information will be updated and the disk will be * re-registered with the kernel. If these conditions don't hold, * then it will be left alone for the next reboot. The exception to this * is disk 0 which will always be left registered with the kernel since it * is also the controller node. Any changes to disk 0 will show up on * the next reboot. */ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, int via_ioctl) { ctlr_info_t *h = hba[ctlr]; struct gendisk *disk; InquiryData_struct *inq_buff = NULL; unsigned int block_size; sector_t total_size; unsigned long flags = 0; int ret = 0; drive_info_struct *drvinfo; /* Get information about the disk and modify the driver structure */ inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); if (inq_buff == NULL || drvinfo == NULL) goto mem_msg; /* testing to see if 16-byte CDBs are already being used */ if (h->cciss_read == CCISS_READ_16) { cciss_read_capacity_16(h->ctlr, drv_index, 1, &total_size, &block_size); } else { cciss_read_capacity(ctlr, drv_index, 1, &total_size, &block_size); /* if read_capacity returns all F's this volume is >2TB */ /* in size so we switch to 16-byte CDB's for all */ /* read/write ops */ if (total_size == 0xFFFFFFFFULL) { cciss_read_capacity_16(ctlr, drv_index, 1, &total_size, &block_size); h->cciss_read = CCISS_READ_16; h->cciss_write = CCISS_WRITE_16; } else { h->cciss_read = CCISS_READ_10; h->cciss_write = CCISS_WRITE_10; } } cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, inq_buff, drvinfo); drvinfo->block_size = block_size; drvinfo->nr_blocks = total_size + 1; cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor, drvinfo->model, drvinfo->rev); cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, sizeof(drvinfo->serial_no)); /* Save the lunid in case we deregister the disk, below. */ memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, sizeof(drvinfo->LunID)); /* Is it the same disk we already know, and nothing's changed? */ if (h->drv[drv_index]->raid_level != -1 && ((memcmp(drvinfo->serial_no, h->drv[drv_index]->serial_no, 16) == 0) && drvinfo->block_size == h->drv[drv_index]->block_size && drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && drvinfo->heads == h->drv[drv_index]->heads && drvinfo->sectors == h->drv[drv_index]->sectors && drvinfo->cylinders == h->drv[drv_index]->cylinders)) /* The disk is unchanged, nothing to update */ goto freeret; /* If we get here it's not the same disk, or something's changed, * so we need to * deregister it, and re-register it, if it's not * in use. * If the disk already exists then deregister it before proceeding * (unless it's the first disk (for the controller node). */ if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { printk(KERN_WARNING "disk %d has changed.\n", drv_index); spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); h->drv[drv_index]->busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); /* deregister_disk sets h->drv[drv_index]->queue = NULL * which keeps the interrupt handler from starting * the queue. */ ret = deregister_disk(h, drv_index, 0, via_ioctl); } /* If the disk is in use return */ if (ret) goto freeret; /* Save the new information from cciss_geometry_inquiry * and serial number inquiry. If the disk was deregistered * above, then h->drv[drv_index] will be NULL. */ if (h->drv[drv_index] == NULL) { drvinfo->device_initialized = 0; h->drv[drv_index] = drvinfo; drvinfo = NULL; /* so it won't be freed below. */ } else { /* special case for cxd0 */ h->drv[drv_index]->block_size = drvinfo->block_size; h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; h->drv[drv_index]->heads = drvinfo->heads; h->drv[drv_index]->sectors = drvinfo->sectors; h->drv[drv_index]->cylinders = drvinfo->cylinders; h->drv[drv_index]->raid_level = drvinfo->raid_level; memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, VENDOR_LEN + 1); memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); } ++h->num_luns; disk = h->gendisk[drv_index]; set_capacity(disk, h->drv[drv_index]->nr_blocks); /* If it's not disk 0 (drv_index != 0) * or if it was disk 0, but there was previously * no actual corresponding configured logical drive * (raid_leve == -1) then we want to update the * logical drive's information. */ if (drv_index || first_time) { if (cciss_add_disk(h, disk, drv_index) != 0) { cciss_free_gendisk(h, drv_index); cciss_free_drive_info(h, drv_index); printk(KERN_WARNING "cciss:%d could not update " "disk %d\n", h->ctlr, drv_index); --h->num_luns; } } freeret: kfree(inq_buff); kfree(drvinfo); return; mem_msg: printk(KERN_ERR "cciss: out of memory\n"); goto freeret; } /* This function will find the first index of the controllers drive array * that has a null drv pointer and allocate the drive info struct and * will return that index This is where new drives will be added. * If the index to be returned is greater than the highest_lun index for * the controller then highest_lun is set * to this new index. * If there are no available indexes or if tha allocation fails, then -1 * is returned. * "controller_node" is used to know if this is a real * logical drive, or just the controller node, which determines if this * counts towards highest_lun. */ static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) { int i; drive_info_struct *drv; /* Search for an empty slot for our drive info */ for (i = 0; i < CISS_MAX_LUN; i++) { /* if not cxd0 case, and it's occupied, skip it. */ if (h->drv[i] && i != 0) continue; /* * If it's cxd0 case, and drv is alloc'ed already, and a * disk is configured there, skip it. */ if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) continue; /* * We've found an empty slot. Update highest_lun * provided this isn't just the fake cxd0 controller node. */ if (i > h->highest_lun && !controller_node) h->highest_lun = i; /* If adding a real disk at cxd0, and it's already alloc'ed */ if (i == 0 && h->drv[i] != NULL) return i; /* * Found an empty slot, not already alloc'ed. Allocate it. * Mark it with raid_level == -1, so we know it's new later on. */ drv = kzalloc(sizeof(*drv), GFP_KERNEL); if (!drv) return -1; drv->raid_level = -1; /* so we know it's new */ h->drv[i] = drv; return i; } return -1; } static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) { kfree(h->drv[drv_index]); h->drv[drv_index] = NULL; } static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) { put_disk(h->gendisk[drv_index]); h->gendisk[drv_index] = NULL; } /* cciss_add_gendisk finds a free hba[]->drv structure * and allocates a gendisk if needed, and sets the lunid * in the drvinfo structure. It returns the index into * the ->drv[] array, or -1 if none are free. * is_controller_node indicates whether highest_lun should * count this disk, or if it's only being added to provide * a means to talk to the controller in case no logical * drives have yet been configured. */ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], int controller_node) { int drv_index; drv_index = cciss_alloc_drive_info(h, controller_node); if (drv_index == -1) return -1; /*Check if the gendisk needs to be allocated */ if (!h->gendisk[drv_index]) { h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT); if (!h->gendisk[drv_index]) { printk(KERN_ERR "cciss%d: could not " "allocate a new disk %d\n", h->ctlr, drv_index); goto err_free_drive_info; } } memcpy(h->drv[drv_index]->LunID, lunid, sizeof(h->drv[drv_index]->LunID)); if (cciss_create_ld_sysfs_entry(h, drv_index)) goto err_free_disk; /* Don't need to mark this busy because nobody */ /* else knows about this disk yet to contend */ /* for access to it. */ h->drv[drv_index]->busy_configuring = 0; wmb(); return drv_index; err_free_disk: cciss_free_gendisk(h, drv_index); err_free_drive_info: cciss_free_drive_info(h, drv_index); return -1; } /* This is for the special case of a controller which * has no logical drives. In this case, we still need * to register a disk so the controller can be accessed * by the Array Config Utility. */ static void cciss_add_controller_node(ctlr_info_t *h) { struct gendisk *disk; int drv_index; if (h->gendisk[0] != NULL) /* already did this? Then bail. */ return; drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); if (drv_index == -1) goto error; h->drv[drv_index]->block_size = 512; h->drv[drv_index]->nr_blocks = 0; h->drv[drv_index]->heads = 0; h->drv[drv_index]->sectors = 0; h->drv[drv_index]->cylinders = 0; h->drv[drv_index]->raid_level = -1; memset(h->drv[drv_index]->serial_no, 0, 16); disk = h->gendisk[drv_index]; if (cciss_add_disk(h, disk, drv_index) == 0) return; cciss_free_gendisk(h, drv_index); cciss_free_drive_info(h, drv_index); error: printk(KERN_WARNING "cciss%d: could not " "add disk 0.\n", h->ctlr); return; } /* This function will add and remove logical drives from the Logical * drive array of the controller and maintain persistency of ordering * so that mount points are preserved until the next reboot. This allows * for the removal of logical drives in the middle of the drive array * without a re-ordering of those drives. * INPUT * h = The controller to perform the operations on */ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl) { int ctlr = h->ctlr; int num_luns; ReportLunData_struct *ld_buff = NULL; int return_code; int listlength = 0; int i; int drv_found; int drv_index = 0; unsigned char lunid[8] = CTLR_LUNID; unsigned long flags; if (!capable(CAP_SYS_RAWIO)) return -EPERM; /* Set busy_configuring flag for this operation */ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); if (h->busy_configuring) { spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return -EBUSY; } h->busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); if (ld_buff == NULL) goto mem_msg; return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, sizeof(ReportLunData_struct), 0, CTLR_LUNID, TYPE_CMD); if (return_code == IO_OK) listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); else { /* reading number of logical volumes failed */ printk(KERN_WARNING "cciss: report logical volume" " command failed\n"); listlength = 0; goto freeret; } num_luns = listlength / 8; /* 8 bytes per entry */ if (num_luns > CISS_MAX_LUN) { num_luns = CISS_MAX_LUN; printk(KERN_WARNING "cciss: more luns configured" " on controller than can be handled by" " this driver.\n"); } if (num_luns == 0) cciss_add_controller_node(h); /* Compare controller drive array to driver's drive array * to see if any drives are missing on the controller due * to action of Array Config Utility (user deletes drive) * and deregister logical drives which have disappeared. */ for (i = 0; i <= h->highest_lun; i++) { int j; drv_found = 0; /* skip holes in the array from already deleted drives */ if (h->drv[i] == NULL) continue; for (j = 0; j < num_luns; j++) { memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); if (memcmp(h->drv[i]->LunID, lunid, sizeof(lunid)) == 0) { drv_found = 1; break; } } if (!drv_found) { /* Deregister it from the OS, it's gone. */ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); h->drv[i]->busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return_code = deregister_disk(h, i, 1, via_ioctl); if (h->drv[i] != NULL) h->drv[i]->busy_configuring = 0; } } /* Compare controller drive array to driver's drive array. * Check for updates in the drive information and any new drives * on the controller due to ACU adding logical drives, or changing * a logical drive's size, etc. Reregister any new/changed drives */ for (i = 0; i < num_luns; i++) { int j; drv_found = 0; memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); /* Find if the LUN is already in the drive array * of the driver. If so then update its info * if not in use. If it does not exist then find * the first free index and add it. */ for (j = 0; j <= h->highest_lun; j++) { if (h->drv[j] != NULL && memcmp(h->drv[j]->LunID, lunid, sizeof(h->drv[j]->LunID)) == 0) { drv_index = j; drv_found = 1; break; } } /* check if the drive was found already in the array */ if (!drv_found) { drv_index = cciss_add_gendisk(h, lunid, 0); if (drv_index == -1) goto freeret; } cciss_update_drive_info(ctlr, drv_index, first_time, via_ioctl); } /* end for */ freeret: kfree(ld_buff); h->busy_configuring = 0; /* We return -1 here to tell the ACU that we have registered/updated * all of the drives that we can and to keep it from calling us * additional times. */ return -1; mem_msg: printk(KERN_ERR "cciss: out of memory\n"); h->busy_configuring = 0; goto freeret; } static void cciss_clear_drive_info(drive_info_struct *drive_info) { /* zero out the disk size info */ drive_info->nr_blocks = 0; drive_info->block_size = 0; drive_info->heads = 0; drive_info->sectors = 0; drive_info->cylinders = 0; drive_info->raid_level = -1; memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); memset(drive_info->model, 0, sizeof(drive_info->model)); memset(drive_info->rev, 0, sizeof(drive_info->rev)); memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); /* * don't clear the LUNID though, we need to remember which * one this one is. */ } /* This function will deregister the disk and it's queue from the * kernel. It must be called with the controller lock held and the * drv structures busy_configuring flag set. It's parameters are: * * disk = This is the disk to be deregistered * drv = This is the drive_info_struct associated with the disk to be * deregistered. It contains information about the disk used * by the driver. * clear_all = This flag determines whether or not the disk information * is going to be completely cleared out and the highest_lun * reset. Sometimes we want to clear out information about * the disk in preparation for re-adding it. In this case * the highest_lun should be left unchanged and the LunID * should not be cleared. * via_ioctl * This indicates whether we've reached this path via ioctl. * This affects the maximum usage count allowed for c0d0 to be messed with. * If this path is reached via ioctl(), then the max_usage_count will * be 1, as the process calling ioctl() has got to have the device open. * If we get here via sysfs, then the max usage count will be zero. */ static int deregister_disk(ctlr_info_t *h, int drv_index, int clear_all, int via_ioctl) { int i; struct gendisk *disk; drive_info_struct *drv; int recalculate_highest_lun; if (!capable(CAP_SYS_RAWIO)) return -EPERM; drv = h->drv[drv_index]; disk = h->gendisk[drv_index]; /* make sure logical volume is NOT is use */ if (clear_all || (h->gendisk[0] == disk)) { if (drv->usage_count > via_ioctl) return -EBUSY; } else if (drv->usage_count > 0) return -EBUSY; recalculate_highest_lun = (drv == h->drv[h->highest_lun]); /* invalidate the devices and deregister the disk. If it is disk * zero do not deregister it but just zero out it's values. This * allows us to delete disk zero but keep the controller registered. */ if (h->gendisk[0] != disk) { struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) { cciss_destroy_ld_sysfs_entry(h, drv_index, 0); del_gendisk(disk); } if (q) blk_cleanup_queue(q); /* If clear_all is set then we are deleting the logical * drive, not just refreshing its info. For drives * other than disk 0 we will call put_disk. We do not * do this for disk 0 as we need it to be able to * configure the controller. */ if (clear_all){ /* This isn't pretty, but we need to find the * disk in our array and NULL our the pointer. * This is so that we will call alloc_disk if * this index is used again later. */ for (i=0; i < CISS_MAX_LUN; i++){ if (h->gendisk[i] == disk) { h->gendisk[i] = NULL; break; } } put_disk(disk); } } else { set_capacity(disk, 0); cciss_clear_drive_info(drv); } --h->num_luns; /* if it was the last disk, find the new hightest lun */ if (clear_all && recalculate_highest_lun) { int i, newhighest = -1; for (i = 0; i <= h->highest_lun; i++) { /* if the disk has size > 0, it is available */ if (h->drv[i] && h->drv[i]->heads) newhighest = i; } h->highest_lun = newhighest; } return 0; } static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, int cmd_type) { ctlr_info_t *h = hba[ctlr]; u64bit buff_dma_handle; int status = IO_OK; c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; if (buff != NULL) { c->Header.SGList = 1; c->Header.SGTotal = 1; } else { c->Header.SGList = 0; c->Header.SGTotal = 0; } c->Header.Tag.lower = c->busaddr; memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); c->Request.Type.Type = cmd_type; if (cmd_type == TYPE_CMD) { switch (cmd) { case CISS_INQUIRY: /* are we trying to read a vital product page */ if (page_code != 0) { c->Request.CDB[1] = 0x01; c->Request.CDB[2] = page_code; } c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = CISS_INQUIRY; c->Request.CDB[4] = size & 0xFF; break; case CISS_REPORT_LOG: case CISS_REPORT_PHYS: /* Talking to controller so It's a physical command mode = 00 target = 0. Nothing to write. */ c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0xFF; c->Request.CDB[9] = size & 0xFF; break; case CCISS_READ_CAPACITY: c->Request.CDBLen = 10; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; case CCISS_READ_CAPACITY_16: c->Request.CDBLen = 16; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[1] = 0x10; c->Request.CDB[10] = (size >> 24) & 0xFF; c->Request.CDB[11] = (size >> 16) & 0xFF; c->Request.CDB[12] = (size >> 8) & 0xFF; c->Request.CDB[13] = size & 0xFF; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; case CCISS_CACHE_FLUSH: c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_WRITE; c->Request.CDB[6] = BMIC_CACHE_FLUSH; break; case TEST_UNIT_READY: c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; break; default: printk(KERN_WARNING "cciss%d: Unknown Command 0x%c\n", ctlr, cmd); return IO_ERROR; } } else if (cmd_type == TYPE_MSG) { switch (cmd) { case 0: /* ABORT message */ c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; /* abort */ c->Request.CDB[1] = 0; /* abort a command */ /* buff contains the tag of the command to abort */ memcpy(&c->Request.CDB[4], buff, 8); break; case 1: /* RESET message */ c->Request.CDBLen = 16; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; /* reset */ c->Request.CDB[1] = 0x03; /* reset a target */ break; case 3: /* No-Op message */ c->Request.CDBLen = 1; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; default: printk(KERN_WARNING "cciss%d: unknown message type %d\n", ctlr, cmd); return IO_ERROR; } } else { printk(KERN_WARNING "cciss%d: unknown command type %d\n", ctlr, cmd_type); return IO_ERROR; } /* Fill in the scatter gather information */ if (size > 0) { buff_dma_handle.val = (__u64) pci_map_single(h->pdev, buff, size, PCI_DMA_BIDIRECTIONAL); c->SG[0].Addr.lower = buff_dma_handle.val32.lower; c->SG[0].Addr.upper = buff_dma_handle.val32.upper; c->SG[0].Len = size; c->SG[0].Ext = 0; /* we are not chaining */ } return status; } static int check_target_status(ctlr_info_t *h, CommandList_struct *c) { switch (c->err_info->ScsiStatus) { case SAM_STAT_GOOD: return IO_OK; case SAM_STAT_CHECK_CONDITION: switch (0xf & c->err_info->SenseInfo[2]) { case 0: return IO_OK; /* no sense */ case 1: return IO_OK; /* recovered error */ default: printk(KERN_WARNING "cciss%d: cmd 0x%02x " "check condition, sense key = 0x%02x\n", h->ctlr, c->Request.CDB[0], c->err_info->SenseInfo[2]); } break; default: printk(KERN_WARNING "cciss%d: cmd 0x%02x" "scsi status = 0x%02x\n", h->ctlr, c->Request.CDB[0], c->err_info->ScsiStatus); break; } return IO_ERROR; } static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) { int return_status = IO_OK; if (c->err_info->CommandStatus == CMD_SUCCESS) return IO_OK; switch (c->err_info->CommandStatus) { case CMD_TARGET_STATUS: return_status = check_target_status(h, c); break; case CMD_DATA_UNDERRUN: case CMD_DATA_OVERRUN: /* expected for inquiry and report lun commands */ break; case CMD_INVALID: printk(KERN_WARNING "cciss: cmd 0x%02x is " "reported invalid\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_PROTOCOL_ERR: printk(KERN_WARNING "cciss: cmd 0x%02x has " "protocol error \n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_HARDWARE_ERR: printk(KERN_WARNING "cciss: cmd 0x%02x had " " hardware error\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_CONNECTION_LOST: printk(KERN_WARNING "cciss: cmd 0x%02x had " "connection lost\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_ABORTED: printk(KERN_WARNING "cciss: cmd 0x%02x was " "aborted\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_ABORT_FAILED: printk(KERN_WARNING "cciss: cmd 0x%02x reports " "abort failed\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_UNSOLICITED_ABORT: printk(KERN_WARNING "cciss%d: unsolicited abort 0x%02x\n", h->ctlr, c->Request.CDB[0]); return_status = IO_NEEDS_RETRY; break; default: printk(KERN_WARNING "cciss: cmd 0x%02x returned " "unknown status %x\n", c->Request.CDB[0], c->err_info->CommandStatus); return_status = IO_ERROR; } return return_status; } static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, int attempt_retry) { DECLARE_COMPLETION_ONSTACK(wait); u64bit buff_dma_handle; unsigned long flags; int return_status = IO_OK; resend_cmd2: c->waiting = &wait; /* Put the request on the tail of the queue and send it */ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); addQ(&h->reqQ, c); h->Qdepth++; start_io(h); spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); wait_for_completion(&wait); if (c->err_info->CommandStatus == 0 || !attempt_retry) goto command_done; return_status = process_sendcmd_error(h, c); if (return_status == IO_NEEDS_RETRY && c->retry_count < MAX_CMD_RETRIES) { printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr, c->Request.CDB[0]); c->retry_count++; /* erase the old error information */ memset(c->err_info, 0, sizeof(ErrorInfo_struct)); return_status = IO_OK; INIT_COMPLETION(wait); goto resend_cmd2; } command_done: /* unlock the buffers from DMA */ buff_dma_handle.val32.lower = c->SG[0].Addr.lower; buff_dma_handle.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); return return_status; } static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, __u8 page_code, unsigned char scsi3addr[], int cmd_type) { ctlr_info_t *h = hba[ctlr]; CommandList_struct *c; int return_status; c = cmd_alloc(h, 0); if (!c) return -ENOMEM; return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code, scsi3addr, cmd_type); if (return_status == IO_OK) return_status = sendcmd_withirq_core(h, c, 1); cmd_free(h, c, 0); return return_status; } static void cciss_geometry_inquiry(int ctlr, int logvol, int withirq, sector_t total_size, unsigned int block_size, InquiryData_struct *inq_buff, drive_info_struct *drv) { int return_code; unsigned long t; unsigned char scsi3addr[8]; memset(inq_buff, 0, sizeof(InquiryData_struct)); log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); if (withirq) return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff, sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); else return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff, sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); if (return_code == IO_OK) { if (inq_buff->data_byte[8] == 0xFF) { printk(KERN_WARNING "cciss: reading geometry failed, volume " "does not support reading geometry\n"); drv->heads = 255; drv->sectors = 32; // Sectors per track drv->cylinders = total_size + 1; drv->raid_level = RAID_UNKNOWN; } else { drv->heads = inq_buff->data_byte[6]; drv->sectors = inq_buff->data_byte[7]; drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8; drv->cylinders += inq_buff->data_byte[5]; drv->raid_level = inq_buff->data_byte[8]; } drv->block_size = block_size; drv->nr_blocks = total_size + 1; t = drv->heads * drv->sectors; if (t > 1) { sector_t real_size = total_size + 1; unsigned long rem = sector_div(real_size, t); if (rem) real_size++; drv->cylinders = real_size; } } else { /* Get geometry failed */ printk(KERN_WARNING "cciss: reading geometry failed\n"); } } static void cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size) { ReadCapdata_struct *buf; int return_code; unsigned char scsi3addr[8]; buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); if (!buf) { printk(KERN_WARNING "cciss: out of memory\n"); return; } log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); if (withirq) return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, buf, sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); else return_code = sendcmd(CCISS_READ_CAPACITY, ctlr, buf, sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); if (return_code == IO_OK) { *total_size = be32_to_cpu(*(__be32 *) buf->total_size); *block_size = be32_to_cpu(*(__be32 *) buf->block_size); } else { /* read capacity command failed */ printk(KERN_WARNING "cciss: read capacity failed\n"); *total_size = 0; *block_size = BLOCK_SIZE; } kfree(buf); } static void cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size) { ReadCapdata_struct_16 *buf; int return_code; unsigned char scsi3addr[8]; buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); if (!buf) { printk(KERN_WARNING "cciss: out of memory\n"); return; } log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); if (withirq) { return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, ctlr, buf, sizeof(ReadCapdata_struct_16), 0, scsi3addr, TYPE_CMD); } else { return_code = sendcmd(CCISS_READ_CAPACITY_16, ctlr, buf, sizeof(ReadCapdata_struct_16), 0, scsi3addr, TYPE_CMD); } if (return_code == IO_OK) { *total_size = be64_to_cpu(*(__be64 *) buf->total_size); *block_size = be32_to_cpu(*(__be32 *) buf->block_size); } else { /* read capacity command failed */ printk(KERN_WARNING "cciss: read capacity failed\n"); *total_size = 0; *block_size = BLOCK_SIZE; } printk(KERN_INFO " blocks= %llu block_size= %d\n", (unsigned long long)*total_size+1, *block_size); kfree(buf); } static int cciss_revalidate(struct gendisk *disk) { ctlr_info_t *h = get_host(disk); drive_info_struct *drv = get_drv(disk); int logvol; int FOUND = 0; unsigned int block_size; sector_t total_size; InquiryData_struct *inq_buff = NULL; for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { if (memcmp(h->drv[logvol]->LunID, drv->LunID, sizeof(drv->LunID)) == 0) { FOUND = 1; break; } } if (!FOUND) return 1; inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (inq_buff == NULL) { printk(KERN_WARNING "cciss: out of memory\n"); return 1; } if (h->cciss_read == CCISS_READ_10) { cciss_read_capacity(h->ctlr, logvol, 1, &total_size, &block_size); } else { cciss_read_capacity_16(h->ctlr, logvol, 1, &total_size, &block_size); } cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv); blk_queue_logical_block_size(drv->queue, drv->block_size); set_capacity(disk, drv->nr_blocks); kfree(inq_buff); return 0; } /* * Wait polling for a command to complete. * The memory mapped FIFO is polled for the completion. * Used only at init time, interrupts from the HBA are disabled. */ static unsigned long pollcomplete(int ctlr) { unsigned long done; int i; /* Wait (up to 20 seconds) for a command to complete */ for (i = 20 * HZ; i > 0; i--) { done = hba[ctlr]->access.command_completed(hba[ctlr]); if (done == FIFO_EMPTY) schedule_timeout_uninterruptible(1); else return done; } /* Invalid address to tell caller we ran out of time */ return 1; } /* Send command c to controller h and poll for it to complete. * Turns interrupts off on the board. Used at driver init time * and during SCSI error recovery. */ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c) { int i; unsigned long complete; int status = IO_ERROR; u64bit buff_dma_handle; resend_cmd1: /* Disable interrupt on the board. */ h->access.set_intr_mask(h, CCISS_INTR_OFF); /* Make sure there is room in the command FIFO */ /* Actually it should be completely empty at this time */ /* unless we are in here doing error handling for the scsi */ /* tape side of the driver. */ for (i = 200000; i > 0; i--) { /* if fifo isn't full go */ if (!(h->access.fifo_full(h))) break; udelay(10); printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full," " waiting!\n", h->ctlr); } h->access.submit_command(h, c); /* Send the cmd */ do { complete = pollcomplete(h->ctlr); #ifdef CCISS_DEBUG printk(KERN_DEBUG "cciss: command completed\n"); #endif /* CCISS_DEBUG */ if (complete == 1) { printk(KERN_WARNING "cciss cciss%d: SendCmd Timeout out, " "No command list address returned!\n", h->ctlr); status = IO_ERROR; break; } /* Make sure it's the command we're expecting. */ if ((complete & ~CISS_ERROR_BIT) != c->busaddr) { printk(KERN_WARNING "cciss%d: Unexpected command " "completion.\n", h->ctlr); continue; } /* It is our command. If no error, we're done. */ if (!(complete & CISS_ERROR_BIT)) { status = IO_OK; break; } /* There is an error... */ /* if data overrun or underun on Report command ignore it */ if (((c->Request.CDB[0] == CISS_REPORT_LOG) || (c->Request.CDB[0] == CISS_REPORT_PHYS) || (c->Request.CDB[0] == CISS_INQUIRY)) && ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) || (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) { complete = c->busaddr; status = IO_OK; break; } if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) { printk(KERN_WARNING "cciss%d: unsolicited abort %p\n", h->ctlr, c); if (c->retry_count < MAX_CMD_RETRIES) { printk(KERN_WARNING "cciss%d: retrying %p\n", h->ctlr, c); c->retry_count++; /* erase the old error information */ memset(c->err_info, 0, sizeof(c->err_info)); goto resend_cmd1; } printk(KERN_WARNING "cciss%d: retried %p too many " "times\n", h->ctlr, c); status = IO_ERROR; break; } if (c->err_info->CommandStatus == CMD_UNABORTABLE) { printk(KERN_WARNING "cciss%d: command could not be " "aborted.\n", h->ctlr); status = IO_ERROR; break; } if (c->err_info->CommandStatus == CMD_TARGET_STATUS) { status = check_target_status(h, c); break; } printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr); printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n", c->Request.CDB[0], c->err_info->CommandStatus); status = IO_ERROR; break; } while (1); /* unlock the data buffer from DMA */ buff_dma_handle.val32.lower = c->SG[0].Addr.lower; buff_dma_handle.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); return status; } /* * Send a command to the controller, and wait for it to complete. * Used at init time, and during SCSI error recovery. */ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, int cmd_type) { CommandList_struct *c; int status; c = cmd_alloc(hba[ctlr], 1); if (!c) { printk(KERN_WARNING "cciss: unable to get memory"); return IO_ERROR; } status = fill_cmd(c, cmd, ctlr, buff, size, page_code, scsi3addr, cmd_type); if (status == IO_OK) status = sendcmd_core(hba[ctlr], c); cmd_free(hba[ctlr], c, 1); return status; } /* * Map (physical) PCI mem into (virtual) kernel space */ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; } /* * Takes jobs of the Q and sends them to the hardware, then puts it on * the Q to wait for completion. */ static void start_io(ctlr_info_t *h) { CommandList_struct *c; while (!hlist_empty(&h->reqQ)) { c = hlist_entry(h->reqQ.first, CommandList_struct, list); /* can't do anything if fifo is full */ if ((h->access.fifo_full(h))) { printk(KERN_WARNING "cciss: fifo full\n"); break; } /* Get the first entry from the Request Q */ removeQ(c); h->Qdepth--; /* Tell the controller execute command */ h->access.submit_command(h, c); /* Put job onto the completed Q */ addQ(&h->cmpQ, c); } } /* Assumes that CCISS_LOCK(h->ctlr) is held. */ /* Zeros out the error record and then resends the command back */ /* to the controller */ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) { /* erase the old error information */ memset(c->err_info, 0, sizeof(ErrorInfo_struct)); /* add it to software queue and then send it to the controller */ addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; start_io(h); } static inline unsigned int make_status_bytes(unsigned int scsi_status_byte, unsigned int msg_byte, unsigned int host_byte, unsigned int driver_byte) { /* inverse of macros in scsi.h */ return (scsi_status_byte & 0xff) | ((msg_byte & 0xff) << 8) | ((host_byte & 0xff) << 16) | ((driver_byte & 0xff) << 24); } static inline int evaluate_target_status(ctlr_info_t *h, CommandList_struct *cmd, int *retry_cmd) { unsigned char sense_key; unsigned char status_byte, msg_byte, host_byte, driver_byte; int error_value; *retry_cmd = 0; /* If we get in here, it means we got "target status", that is, scsi status */ status_byte = cmd->err_info->ScsiStatus; driver_byte = DRIVER_OK; msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ if (blk_pc_request(cmd->rq)) host_byte = DID_PASSTHROUGH; else host_byte = DID_OK; error_value = make_status_bytes(status_byte, msg_byte, host_byte, driver_byte); if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { if (!blk_pc_request(cmd->rq)) printk(KERN_WARNING "cciss: cmd %p " "has SCSI Status 0x%x\n", cmd, cmd->err_info->ScsiStatus); return error_value; } /* check the sense key */ sense_key = 0xf & cmd->err_info->SenseInfo[2]; /* no status or recovered error */ if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq)) error_value = 0; if (check_for_unit_attention(h, cmd)) { *retry_cmd = !blk_pc_request(cmd->rq); return 0; } if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */ if (error_value != 0) printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION" " sense key = 0x%x\n", cmd, sense_key); return error_value; } /* SG_IO or similar, copy sense data back */ if (cmd->rq->sense) { if (cmd->rq->sense_len > cmd->err_info->SenseLen) cmd->rq->sense_len = cmd->err_info->SenseLen; memcpy(cmd->rq->sense, cmd->err_info->SenseInfo, cmd->rq->sense_len); } else cmd->rq->sense_len = 0; return error_value; } /* checks the status of the job and calls complete buffers to mark all * buffers for the completed job. Note that this function does not need * to hold the hba/queue lock. */ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, int timeout) { int retry_cmd = 0; struct request *rq = cmd->rq; rq->errors = 0; if (timeout) rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT); if (cmd->err_info->CommandStatus == 0) /* no error has occurred */ goto after_error_processing; switch (cmd->err_info->CommandStatus) { case CMD_TARGET_STATUS: rq->errors = evaluate_target_status(h, cmd, &retry_cmd); break; case CMD_DATA_UNDERRUN: if (blk_fs_request(cmd->rq)) { printk(KERN_WARNING "cciss: cmd %p has" " completed with data underrun " "reported\n", cmd); cmd->rq->resid_len = cmd->err_info->ResidualCnt; } break; case CMD_DATA_OVERRUN: if (blk_fs_request(cmd->rq)) printk(KERN_WARNING "cciss: cmd %p has" " completed with data overrun " "reported\n", cmd); break; case CMD_INVALID: printk(KERN_WARNING "cciss: cmd %p is " "reported invalid\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_PROTOCOL_ERR: printk(KERN_WARNING "cciss: cmd %p has " "protocol error \n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_HARDWARE_ERR: printk(KERN_WARNING "cciss: cmd %p had " " hardware error\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_CONNECTION_LOST: printk(KERN_WARNING "cciss: cmd %p had " "connection lost\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_ABORTED: printk(KERN_WARNING "cciss: cmd %p was " "aborted\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); break; case CMD_ABORT_FAILED: printk(KERN_WARNING "cciss: cmd %p reports " "abort failed\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_UNSOLICITED_ABORT: printk(KERN_WARNING "cciss%d: unsolicited " "abort %p\n", h->ctlr, cmd); if (cmd->retry_count < MAX_CMD_RETRIES) { retry_cmd = 1; printk(KERN_WARNING "cciss%d: retrying %p\n", h->ctlr, cmd); cmd->retry_count++; } else printk(KERN_WARNING "cciss%d: %p retried too " "many times\n", h->ctlr, cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); break; case CMD_TIMEOUT: printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); break; default: printk(KERN_WARNING "cciss: cmd %p returned " "unknown status %x\n", cmd, cmd->err_info->CommandStatus); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); } after_error_processing: /* We need to return this command */ if (retry_cmd) { resend_cciss_cmd(h, cmd); return; } cmd->rq->completion_data = cmd; blk_complete_request(cmd->rq); } /* * Get a request and submit it to the controller. */ static void do_cciss_request(struct request_queue *q) { ctlr_info_t *h = q->queuedata; CommandList_struct *c; sector_t start_blk; int seg; struct request *creq; u64bit temp64; struct scatterlist tmp_sg[MAXSGENTRIES]; drive_info_struct *drv; int i, dir; /* We call start_io here in case there is a command waiting on the * queue that has not been sent. */ if (blk_queue_plugged(q)) goto startio; queue: creq = blk_peek_request(q); if (!creq) goto startio; BUG_ON(creq->nr_phys_segments > MAXSGENTRIES); if ((c = cmd_alloc(h, 1)) == NULL) goto full; blk_start_request(creq); spin_unlock_irq(q->queue_lock); c->cmd_type = CMD_RWREQ; c->rq = creq; /* fill in the request */ drv = creq->rq_disk->private_data; c->Header.ReplyQueue = 0; // unused in simple mode /* got command from pool, so use the command block index instead */ /* for direct lookups. */ /* The first 2 bits are reserved for controller error reporting. */ c->Header.Tag.lower = (c->cmdindex << 3); c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); c->Request.CDBLen = 10; // 12 byte commands not in FW yet; c->Request.Type.Type = TYPE_CMD; // It is a command. c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; c->Request.Timeout = 0; // Don't time out c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; start_blk = blk_rq_pos(creq); #ifdef CCISS_DEBUG printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); #endif /* CCISS_DEBUG */ sg_init_table(tmp_sg, MAXSGENTRIES); seg = blk_rq_map_sg(q, creq, tmp_sg); /* get the DMA records for the setup */ if (c->Request.Type.Direction == XFER_READ) dir = PCI_DMA_FROMDEVICE; else dir = PCI_DMA_TODEVICE; for (i = 0; i < seg; i++) { c->SG[i].Len = tmp_sg[i].length; temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), tmp_sg[i].offset, tmp_sg[i].length, dir); c->SG[i].Addr.lower = temp64.val32.lower; c->SG[i].Addr.upper = temp64.val32.upper; c->SG[i].Ext = 0; // we are not chaining } /* track how many SG entries we are using */ if (seg > h->maxSG) h->maxSG = seg; #ifdef CCISS_DEBUG printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); #endif /* CCISS_DEBUG */ c->Header.SGList = c->Header.SGTotal = seg; if (likely(blk_fs_request(creq))) { if(h->cciss_read == CCISS_READ_10) { c->Request.CDB[1] = 0; c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB c->Request.CDB[3] = (start_blk >> 16) & 0xff; c->Request.CDB[4] = (start_blk >> 8) & 0xff; c->Request.CDB[5] = start_blk & 0xff; c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; } else { u32 upper32 = upper_32_bits(start_blk); c->Request.CDBLen = 16; c->Request.CDB[1]= 0; c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB c->Request.CDB[3]= (upper32 >> 16) & 0xff; c->Request.CDB[4]= (upper32 >> 8) & 0xff; c->Request.CDB[5]= upper32 & 0xff; c->Request.CDB[6]= (start_blk >> 24) & 0xff; c->Request.CDB[7]= (start_blk >> 16) & 0xff; c->Request.CDB[8]= (start_blk >> 8) & 0xff; c->Request.CDB[9]= start_blk & 0xff; c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff; c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff; c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff; c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; c->Request.CDB[14] = c->Request.CDB[15] = 0; } } else if (blk_pc_request(creq)) { c->Request.CDBLen = creq->cmd_len; memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); } else { printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type); BUG(); } spin_lock_irq(q->queue_lock); addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; goto queue; full: blk_stop_queue(q); startio: /* We will already have the driver lock here so not need * to lock it. */ start_io(h); } static inline unsigned long get_next_completion(ctlr_info_t *h) { return h->access.command_completed(h); } static inline int interrupt_pending(ctlr_info_t *h) { return h->access.intr_pending(h); } static inline long interrupt_not_for_us(ctlr_info_t *h) { return (((h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0))); } static irqreturn_t do_cciss_intr(int irq, void *dev_id) { ctlr_info_t *h = dev_id; CommandList_struct *c; unsigned long flags; __u32 a, a1, a2; if (interrupt_not_for_us(h)) return IRQ_NONE; /* * If there are completed commands in the completion queue, * we had better do something about it. */ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); while (interrupt_pending(h)) { while ((a = get_next_completion(h)) != FIFO_EMPTY) { a1 = a; if ((a & 0x04)) { a2 = (a >> 3); if (a2 >= h->nr_cmds) { printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr); fail_all_cmds(h->ctlr); return IRQ_HANDLED; } c = h->cmd_pool + a2; a = c->busaddr; } else { struct hlist_node *tmp; a &= ~3; c = NULL; hlist_for_each_entry(c, tmp, &h->cmpQ, list) { if (c->busaddr == a) break; } } /* * If we've found the command, take it off the * completion Q and free it */ if (c && c->busaddr == a) { removeQ(c); if (c->cmd_type == CMD_RWREQ) { complete_command(h, c, 0); } else if (c->cmd_type == CMD_IOCTL_PEND) { complete(c->waiting); } # ifdef CONFIG_CISS_SCSI_TAPE else if (c->cmd_type == CMD_SCSI) complete_scsi_command(c, 0, a1); # endif continue; } } } spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); return IRQ_HANDLED; } /** * add_to_scan_list() - add controller to rescan queue * @h: Pointer to the controller. * * Adds the controller to the rescan queue if not already on the queue. * * returns 1 if added to the queue, 0 if skipped (could be on the * queue already, or the controller could be initializing or shutting * down). **/ static int add_to_scan_list(struct ctlr_info *h) { struct ctlr_info *test_h; int found = 0; int ret = 0; if (h->busy_initializing) return 0; if (!mutex_trylock(&h->busy_shutting_down)) return 0; mutex_lock(&scan_mutex); list_for_each_entry(test_h, &scan_q, scan_list) { if (test_h == h) { found = 1; break; } } if (!found && !h->busy_scanning) { INIT_COMPLETION(h->scan_wait); list_add_tail(&h->scan_list, &scan_q); ret = 1; } mutex_unlock(&scan_mutex); mutex_unlock(&h->busy_shutting_down); return ret; } /** * remove_from_scan_list() - remove controller from rescan queue * @h: Pointer to the controller. * * Removes the controller from the rescan queue if present. Blocks if * the controller is currently conducting a rescan. **/ static void remove_from_scan_list(struct ctlr_info *h) { struct ctlr_info *test_h, *tmp_h; int scanning = 0; mutex_lock(&scan_mutex); list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { if (test_h == h) { list_del(&h->scan_list); complete_all(&h->scan_wait); mutex_unlock(&scan_mutex); return; } } if (&h->busy_scanning) scanning = 0; mutex_unlock(&scan_mutex); if (scanning) wait_for_completion(&h->scan_wait); } /** * scan_thread() - kernel thread used to rescan controllers * @data: Ignored. * * A kernel thread used scan for drive topology changes on * controllers. The thread processes only one controller at a time * using a queue. Controllers are added to the queue using * add_to_scan_list() and removed from the queue either after done * processing or using remove_from_scan_list(). * * returns 0. **/ static int scan_thread(void *data) { struct ctlr_info *h; while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; while (1) { mutex_lock(&scan_mutex); if (list_empty(&scan_q)) { mutex_unlock(&scan_mutex); break; } h = list_entry(scan_q.next, struct ctlr_info, scan_list); list_del(&h->scan_list); h->busy_scanning = 1; mutex_unlock(&scan_mutex); if (h) { rebuild_lun_table(h, 0, 0); complete_all(&h->scan_wait); mutex_lock(&scan_mutex); h->busy_scanning = 0; mutex_unlock(&scan_mutex); } } } return 0; } static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) { if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) return 0; switch (c->err_info->SenseInfo[12]) { case STATE_CHANGED: printk(KERN_WARNING "cciss%d: a state change " "detected, command retried\n", h->ctlr); return 1; break; case LUN_FAILED: printk(KERN_WARNING "cciss%d: LUN failure " "detected, action required\n", h->ctlr); return 1; break; case REPORT_LUNS_CHANGED: printk(KERN_WARNING "cciss%d: report LUN data " "changed\n", h->ctlr); add_to_scan_list(h); wake_up_process(cciss_scan_thread); return 1; break; case POWER_OR_RESET: printk(KERN_WARNING "cciss%d: a power on " "or device reset detected\n", h->ctlr); return 1; break; case UNIT_ATTENTION_CLEARED: printk(KERN_WARNING "cciss%d: unit attention " "cleared by another initiator\n", h->ctlr); return 1; break; default: printk(KERN_WARNING "cciss%d: unknown " "unit attention detected\n", h->ctlr); return 1; } } /* * We cannot read the structure directly, for portability we must use * the io functions. * This is for debug only. */ #ifdef CCISS_DEBUG static void print_cfg_table(CfgTable_struct *tb) { int i; char temp_name[17]; printk("Controller Configuration information\n"); printk("------------------------------------\n"); for (i = 0; i < 4; i++) temp_name[i] = readb(&(tb->Signature[i])); temp_name[4] = '\0'; printk(" Signature = %s\n", temp_name); printk(" Spec Number = %d\n", readl(&(tb->SpecValence))); printk(" Transport methods supported = 0x%x\n", readl(&(tb->TransportSupport))); printk(" Transport methods active = 0x%x\n", readl(&(tb->TransportActive))); printk(" Requested transport Method = 0x%x\n", readl(&(tb->HostWrite.TransportRequest))); printk(" Coalesce Interrupt Delay = 0x%x\n", readl(&(tb->HostWrite.CoalIntDelay))); printk(" Coalesce Interrupt Count = 0x%x\n", readl(&(tb->HostWrite.CoalIntCount))); printk(" Max outstanding commands = 0x%d\n", readl(&(tb->CmdsOutMax))); printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes))); for (i = 0; i < 16; i++) temp_name[i] = readb(&(tb->ServerName[i])); temp_name[16] = '\0'; printk(" Server Name = %s\n", temp_name); printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat))); } #endif /* CCISS_DEBUG */ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) { int i, offset, mem_type, bar_type; if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ return 0; offset = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) offset += 4; else { mem_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_32: case PCI_BASE_ADDRESS_MEM_TYPE_1M: offset += 4; /* 32 bit */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: offset += 8; break; default: /* reserved in PCI 2.2 */ printk(KERN_WARNING "Base address is invalid\n"); return -1; break; } } if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) return i + 1; } return -1; } /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use IO-APIC mode. */ static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id) { #ifdef CONFIG_PCI_MSI int err; struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1}, {0, 2}, {0, 3} }; /* Some boards advertise MSI but don't really support it */ if ((board_id == 0x40700E11) || (board_id == 0x40800E11) || (board_id == 0x40820E11) || (board_id == 0x40830E11)) goto default_int_mode; if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { err = pci_enable_msix(pdev, cciss_msix_entries, 4); if (!err) { c->intr[0] = cciss_msix_entries[0].vector; c->intr[1] = cciss_msix_entries[1].vector; c->intr[2] = cciss_msix_entries[2].vector; c->intr[3] = cciss_msix_entries[3].vector; c->msix_vector = 1; return; } if (err > 0) { printk(KERN_WARNING "cciss: only %d MSI-X vectors " "available\n", err); goto default_int_mode; } else { printk(KERN_WARNING "cciss: MSI-X init failed %d\n", err); goto default_int_mode; } } if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) { if (!pci_enable_msi(pdev)) { c->msi_vector = 1; } else { printk(KERN_WARNING "cciss: MSI init failed\n"); } } default_int_mode: #endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ c->intr[SIMPLE_MODE_INT] = pdev->irq; return; } static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) { ushort subsystem_vendor_id, subsystem_device_id, command; __u32 board_id, scratchpad = 0; __u64 cfg_offset; __u32 cfg_base_addr; __u64 cfg_base_addr_index; int i, prod_index, err; subsystem_vendor_id = pdev->subsystem_vendor; subsystem_device_id = pdev->subsystem_device; board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | subsystem_vendor_id); for (i = 0; i < ARRAY_SIZE(products); i++) { /* Stand aside for hpsa driver on request */ if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY) return -ENODEV; if (board_id == products[i].board_id) break; } prod_index = i; if (prod_index == ARRAY_SIZE(products)) { dev_warn(&pdev->dev, "unrecognized board ID: 0x%08lx, ignoring.\n", (unsigned long) board_id); return -ENODEV; } /* check to see if controller has been disabled */ /* BEFORE trying to enable it */ (void)pci_read_config_word(pdev, PCI_COMMAND, &command); if (!(command & 0x02)) { printk(KERN_WARNING "cciss: controller appears to be disabled\n"); return -ENODEV; } err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "cciss: Unable to Enable PCI device\n"); return err; } err = pci_request_regions(pdev, "cciss"); if (err) { printk(KERN_ERR "cciss: Cannot obtain PCI resources, " "aborting\n"); return err; } #ifdef CCISS_DEBUG printk("command = %x\n", command); printk("irq = %x\n", pdev->irq); printk("board_id = %x\n", board_id); #endif /* CCISS_DEBUG */ /* If the kernel supports MSI/MSI-X we will try to enable that functionality, * else we use the IO-APIC interrupt assigned to us by system ROM. */ cciss_interrupt_mode(c, pdev, board_id); /* find the memory BAR */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) break; } if (i == DEVICE_COUNT_RESOURCE) { printk(KERN_WARNING "cciss: No memory BAR found\n"); err = -ENODEV; goto err_out_free_res; } c->paddr = pci_resource_start(pdev, i); /* addressing mode bits * already removed */ #ifdef CCISS_DEBUG printk("address 0 = %lx\n", c->paddr); #endif /* CCISS_DEBUG */ c->vaddr = remap_pci_mem(c->paddr, 0x250); /* Wait for the board to become ready. (PCI hotplug needs this.) * We poll for up to 120 secs, once per 100ms. */ for (i = 0; i < 1200; i++) { scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET); if (scratchpad == CCISS_FIRMWARE_READY) break; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */ } if (scratchpad != CCISS_FIRMWARE_READY) { printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); err = -ENODEV; goto err_out_free_res; } /* get the address index number */ cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET); cfg_base_addr &= (__u32) 0x0000ffff; #ifdef CCISS_DEBUG printk("cfg base address = %x\n", cfg_base_addr); #endif /* CCISS_DEBUG */ cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); #ifdef CCISS_DEBUG printk("cfg base address index = %llx\n", (unsigned long long)cfg_base_addr_index); #endif /* CCISS_DEBUG */ if (cfg_base_addr_index == -1) { printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); err = -ENODEV; goto err_out_free_res; } cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); #ifdef CCISS_DEBUG printk("cfg offset = %llx\n", (unsigned long long)cfg_offset); #endif /* CCISS_DEBUG */ c->cfgtable = remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index) + cfg_offset, sizeof(CfgTable_struct)); c->board_id = board_id; #ifdef CCISS_DEBUG print_cfg_table(c->cfgtable); #endif /* CCISS_DEBUG */ /* Some controllers support Zero Memory Raid (ZMR). * When configured in ZMR mode the number of supported * commands drops to 64. So instead of just setting an * arbitrary value we make the driver a little smarter. * We read the config table to tell us how many commands * are supported on the controller then subtract 4 to * leave a little room for ioctl calls. */ c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); c->product_name = products[prod_index].product_name; c->access = *(products[prod_index].access); c->nr_cmds = c->max_commands - 4; if ((readb(&c->cfgtable->Signature[0]) != 'C') || (readb(&c->cfgtable->Signature[1]) != 'I') || (readb(&c->cfgtable->Signature[2]) != 'S') || (readb(&c->cfgtable->Signature[3]) != 'S')) { printk("Does not appear to be a valid CISS config table\n"); err = -ENODEV; goto err_out_free_res; } #ifdef CONFIG_X86 { /* Need to enable prefetch in the SCSI core for 6400 in x86 */ __u32 prefetch; prefetch = readl(&(c->cfgtable->SCSI_Prefetch)); prefetch |= 0x100; writel(prefetch, &(c->cfgtable->SCSI_Prefetch)); } #endif /* Disabling DMA prefetch and refetch for the P600. * An ASIC bug may result in accesses to invalid memory addresses. * We've disabled prefetch for some time now. Testing with XEN * kernels revealed a bug in the refetch if dom0 resides on a P600. */ if(board_id == 0x3225103C) { __u32 dma_prefetch; __u32 dma_refetch; dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG); dma_prefetch |= 0x8000; writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG); pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch); dma_refetch |= 0x1; pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch); } #ifdef CCISS_DEBUG printk("Trying to put board into Simple mode\n"); #endif /* CCISS_DEBUG */ c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); /* Update the field, and then ring the doorbell */ writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL); /* under certain very rare conditions, this can take awhile. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right * as we enter this code.) */ for (i = 0; i < MAX_CONFIG_WAIT; i++) { if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; /* delay and try again */ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(1)); } #ifdef CCISS_DEBUG printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL)); #endif /* CCISS_DEBUG */ #ifdef CCISS_DEBUG print_cfg_table(c->cfgtable); #endif /* CCISS_DEBUG */ if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { printk(KERN_WARNING "cciss: unable to get board into" " simple mode\n"); err = -ENODEV; goto err_out_free_res; } return 0; err_out_free_res: /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_release_regions(pdev); return err; } /* Function to find the first free pointer into our hba[] array * Returns -1 if no free entries are left. */ static int alloc_cciss_hba(void) { int i; for (i = 0; i < MAX_CTLR; i++) { if (!hba[i]) { ctlr_info_t *p; p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); if (!p) goto Enomem; hba[i] = p; return i; } } printk(KERN_WARNING "cciss: This driver supports a maximum" " of %d controllers.\n", MAX_CTLR); return -1; Enomem: printk(KERN_ERR "cciss: out of memory.\n"); return -1; } static void free_hba(int n) { ctlr_info_t *h = hba[n]; int i; hba[n] = NULL; for (i = 0; i < h->highest_lun + 1; i++) if (h->gendisk[i] != NULL) put_disk(h->gendisk[i]); kfree(h); } /* Send a message CDB to the firmware. */ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) { typedef struct { CommandListHeader_struct CommandHeader; RequestBlock_struct Request; ErrDescriptor_struct ErrorDescriptor; } Command; static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct); Command *cmd; dma_addr_t paddr64; uint32_t paddr32, tag; void __iomem *vaddr; int i, err; vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (vaddr == NULL) return -ENOMEM; /* The Inbound Post Queue only accepts 32-bit physical addresses for the CCISS commands, so they must be allocated from the lower 4GiB of memory. */ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { iounmap(vaddr); return -ENOMEM; } cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); if (cmd == NULL) { iounmap(vaddr); return -ENOMEM; } /* This must fit, because of the 32-bit consistent DMA mask. Also, although there's no guarantee, we assume that the address is at least 4-byte aligned (most likely, it's page-aligned). */ paddr32 = paddr64; cmd->CommandHeader.ReplyQueue = 0; cmd->CommandHeader.SGList = 0; cmd->CommandHeader.SGTotal = 0; cmd->CommandHeader.Tag.lower = paddr32; cmd->CommandHeader.Tag.upper = 0; memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); cmd->Request.CDBLen = 16; cmd->Request.Type.Type = TYPE_MSG; cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; cmd->Request.Type.Direction = XFER_NONE; cmd->Request.Timeout = 0; /* Don't time out */ cmd->Request.CDB[0] = opcode; cmd->Request.CDB[1] = type; memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */ cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command); cmd->ErrorDescriptor.Addr.upper = 0; cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct); writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); for (i = 0; i < 10; i++) { tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); if ((tag & ~3) == paddr32) break; schedule_timeout_uninterruptible(HZ); } iounmap(vaddr); /* we leak the DMA buffer here ... no choice since the controller could still complete the command. */ if (i == 10) { printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n", opcode, type); return -ETIMEDOUT; } pci_free_consistent(pdev, cmd_sz, cmd, paddr64); if (tag & 2) { printk(KERN_ERR "cciss: controller message %02x:%02x failed\n", opcode, type); return -EIO; } printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n", opcode, type); return 0; } #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) #define cciss_noop(p) cciss_message(p, 3, 0) static __devinit int cciss_reset_msi(struct pci_dev *pdev) { /* the #defines are stolen from drivers/pci/msi.h. */ #define msi_control_reg(base) (base + PCI_MSI_FLAGS) #define PCI_MSIX_FLAGS_ENABLE (1 << 15) int pos; u16 control = 0; pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(pdev, msi_control_reg(pos), &control); if (control & PCI_MSI_FLAGS_ENABLE) { printk(KERN_INFO "cciss: resetting MSI\n"); pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); } } pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(pdev, msi_control_reg(pos), &control); if (control & PCI_MSIX_FLAGS_ENABLE) { printk(KERN_INFO "cciss: resetting MSI-X\n"); pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); } } return 0; } /* This does a hard reset of the controller using PCI power management * states. */ static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev) { u16 pmcsr, saved_config_space[32]; int i, pos; printk(KERN_INFO "cciss: using PCI PM to reset controller\n"); /* This is very nearly the same thing as pci_save_state(pci_dev); pci_set_power_state(pci_dev, PCI_D3hot); pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); but we can't use these nice canned kernel routines on kexec, because they also check the MSI/MSI-X state in PCI configuration space and do the wrong thing when it is set/cleared. Also, the pci_save/restore_state functions violate the ordering requirements for restoring the configuration space from the CCISS document (see the comment below). So we roll our own .... */ for (i = 0; i < 32; i++) pci_read_config_word(pdev, 2*i, &saved_config_space[i]); pos = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pos == 0) { printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n"); return -ENODEV; } /* Quoting from the Open CISS Specification: "The Power * Management Control/Status Register (CSR) controls the power * state of the device. The normal operating state is D0, * CSR=00h. The software off state is D3, CSR=03h. To reset * the controller, place the interface device in D3 then to * D0, this causes a secondary PCI reset which will reset the * controller." */ /* enter the D3hot power management state */ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D3hot; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); schedule_timeout_uninterruptible(HZ >> 1); /* enter the D0 power management state */ pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D0; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); schedule_timeout_uninterruptible(HZ >> 1); /* Restore the PCI configuration space. The Open CISS * Specification says, "Restore the PCI Configuration * Registers, offsets 00h through 60h. It is important to * restore the command register, 16-bits at offset 04h, * last. Do not restore the configuration status register, * 16-bits at offset 06h." Note that the offset is 2*i. */ for (i = 0; i < 32; i++) { if (i == 2 || i == 3) continue; pci_write_config_word(pdev, 2*i, saved_config_space[i]); } wmb(); pci_write_config_word(pdev, 4, saved_config_space[2]); return 0; } /* * This is it. Find all the controllers and register them. I really hate * stealing all these major device numbers. * returns the number of block devices registered. */ static int __devinit cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int i; int j = 0; int rc; int dac, return_code; InquiryData_struct *inq_buff; if (reset_devices) { /* Reset the controller with a PCI power-cycle */ if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev)) return -ENODEV; /* Now try to get the controller to respond to a no-op. Some devices (notably the HP Smart Array 5i Controller) need up to 30 seconds to respond. */ for (i=0; i<30; i++) { if (cciss_noop(pdev) == 0) break; schedule_timeout_uninterruptible(HZ); } if (i == 30) { printk(KERN_ERR "cciss: controller seems dead\n"); return -EBUSY; } } i = alloc_cciss_hba(); if (i < 0) return -1; hba[i]->busy_initializing = 1; INIT_HLIST_HEAD(&hba[i]->cmpQ); INIT_HLIST_HEAD(&hba[i]->reqQ); mutex_init(&hba[i]->busy_shutting_down); if (cciss_pci_init(hba[i], pdev) != 0) goto clean_no_release_regions; sprintf(hba[i]->devname, "cciss%d", i); hba[i]->ctlr = i; hba[i]->pdev = pdev; init_completion(&hba[i]->scan_wait); if (cciss_create_hba_sysfs_entry(hba[i])) goto clean0; /* configure PCI DMA stuff */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) dac = 1; else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) dac = 0; else { printk(KERN_ERR "cciss: no suitable DMA available\n"); goto clean1; } /* * register with the major number, or get a dynamic major number * by passing 0 as argument. This is done for greater than * 8 controller support. */ if (i < MAX_CTLR_ORIG) hba[i]->major = COMPAQ_CISS_MAJOR + i; rc = register_blkdev(hba[i]->major, hba[i]->devname); if (rc == -EBUSY || rc == -EINVAL) { printk(KERN_ERR "cciss: Unable to get major number %d for %s " "on hba %d\n", hba[i]->major, hba[i]->devname, i); goto clean1; } else { if (i >= MAX_CTLR_ORIG) hba[i]->major = rc; } /* make sure the board interrupts are off */ hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF); if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr, IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) { printk(KERN_ERR "cciss: Unable to get irq %d for %s\n", hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname); goto clean2; } printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", hba[i]->devname, pdev->device, pci_name(pdev), hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); hba[i]->cmd_pool_bits = kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); hba[i]->cmd_pool = (CommandList_struct *) pci_alloc_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct), &(hba[i]->cmd_pool_dhandle)); hba[i]->errinfo_pool = (ErrorInfo_struct *) pci_alloc_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), &(hba[i]->errinfo_pool_dhandle)); if ((hba[i]->cmd_pool_bits == NULL) || (hba[i]->cmd_pool == NULL) || (hba[i]->errinfo_pool == NULL)) { printk(KERN_ERR "cciss: out of memory"); goto clean4; } spin_lock_init(&hba[i]->lock); /* Initialize the pdev driver private data. have it point to hba[i]. */ pci_set_drvdata(pdev, hba[i]); /* command and error info recs zeroed out before they are used */ memset(hba[i]->cmd_pool_bits, 0, DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) * sizeof(unsigned long)); hba[i]->num_luns = 0; hba[i]->highest_lun = -1; for (j = 0; j < CISS_MAX_LUN; j++) { hba[i]->drv[j] = NULL; hba[i]->gendisk[j] = NULL; } cciss_scsi_setup(i); /* Turn the interrupts on so we can service requests */ hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); /* Get the firmware version */ inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (inq_buff == NULL) { printk(KERN_ERR "cciss: out of memory\n"); goto clean4; } return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff, sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); if (return_code == IO_OK) { hba[i]->firm_ver[0] = inq_buff->data_byte[32]; hba[i]->firm_ver[1] = inq_buff->data_byte[33]; hba[i]->firm_ver[2] = inq_buff->data_byte[34]; hba[i]->firm_ver[3] = inq_buff->data_byte[35]; } else { /* send command failed */ printk(KERN_WARNING "cciss: unable to determine firmware" " version of controller\n"); } kfree(inq_buff); cciss_procinit(i); hba[i]->cciss_max_sectors = 2048; rebuild_lun_table(hba[i], 1, 0); hba[i]->busy_initializing = 0; return 1; clean4: kfree(hba[i]->cmd_pool_bits); if (hba[i]->cmd_pool) pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct), hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); if (hba[i]->errinfo_pool) pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); clean2: unregister_blkdev(hba[i]->major, hba[i]->devname); clean1: cciss_destroy_hba_sysfs_entry(hba[i]); clean0: pci_release_regions(pdev); clean_no_release_regions: hba[i]->busy_initializing = 0; /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_set_drvdata(pdev, NULL); free_hba(i); return -1; } static void cciss_shutdown(struct pci_dev *pdev) { ctlr_info_t *tmp_ptr; int i; char flush_buf[4]; int return_code; tmp_ptr = pci_get_drvdata(pdev); if (tmp_ptr == NULL) return; i = tmp_ptr->ctlr; if (hba[i] == NULL) return; /* Turn board interrupts off and send the flush cache command */ /* sendcmd will turn off interrupt, and send the flush... * To write all data in the battery backed cache to disks */ memset(flush_buf, 0, 4); return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, CTLR_LUNID, TYPE_CMD); if (return_code == IO_OK) { printk(KERN_INFO "Completed flushing cache on controller %d\n", i); } else { printk(KERN_WARNING "Error flushing cache on controller %d\n", i); } free_irq(hba[i]->intr[2], hba[i]); } static void __devexit cciss_remove_one(struct pci_dev *pdev) { ctlr_info_t *tmp_ptr; int i, j; if (pci_get_drvdata(pdev) == NULL) { printk(KERN_ERR "cciss: Unable to remove device \n"); return; } tmp_ptr = pci_get_drvdata(pdev); i = tmp_ptr->ctlr; if (hba[i] == NULL) { printk(KERN_ERR "cciss: device appears to " "already be removed \n"); return; } mutex_lock(&hba[i]->busy_shutting_down); remove_from_scan_list(hba[i]); remove_proc_entry(hba[i]->devname, proc_cciss); unregister_blkdev(hba[i]->major, hba[i]->devname); /* remove it from the disk list */ for (j = 0; j < CISS_MAX_LUN; j++) { struct gendisk *disk = hba[i]->gendisk[j]; if (disk) { struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) { cciss_destroy_ld_sysfs_entry(hba[i], j, 1); del_gendisk(disk); } if (q) blk_cleanup_queue(q); } } #ifdef CONFIG_CISS_SCSI_TAPE cciss_unregister_scsi(i); /* unhook from SCSI subsystem */ #endif cciss_shutdown(pdev); #ifdef CONFIG_PCI_MSI if (hba[i]->msix_vector) pci_disable_msix(hba[i]->pdev); else if (hba[i]->msi_vector) pci_disable_msi(hba[i]->pdev); #endif /* CONFIG_PCI_MSI */ iounmap(hba[i]->vaddr); pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct), hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); kfree(hba[i]->cmd_pool_bits); /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); cciss_destroy_hba_sysfs_entry(hba[i]); mutex_unlock(&hba[i]->busy_shutting_down); free_hba(i); } static struct pci_driver cciss_pci_driver = { .name = "cciss", .probe = cciss_init_one, .remove = __devexit_p(cciss_remove_one), .id_table = cciss_pci_device_id, /* id_table */ .shutdown = cciss_shutdown, }; /* * This is it. Register the PCI driver information for the cards we control * the OS will call our registered routines when it finds one of our cards. */ static int __init cciss_init(void) { int err; /* * The hardware requires that commands are aligned on a 64-bit * boundary. Given that we use pci_alloc_consistent() to allocate an * array of them, the size must be a multiple of 8 bytes. */ BUILD_BUG_ON(sizeof(CommandList_struct) % 8); printk(KERN_INFO DRIVER_NAME "\n"); err = bus_register(&cciss_bus_type); if (err) return err; /* Start the scan thread */ cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); if (IS_ERR(cciss_scan_thread)) { err = PTR_ERR(cciss_scan_thread); goto err_bus_unregister; } /* Register for our PCI devices */ err = pci_register_driver(&cciss_pci_driver); if (err) goto err_thread_stop; return err; err_thread_stop: kthread_stop(cciss_scan_thread); err_bus_unregister: bus_unregister(&cciss_bus_type); return err; } static void __exit cciss_cleanup(void) { int i; pci_unregister_driver(&cciss_pci_driver); /* double check that all controller entrys have been removed */ for (i = 0; i < MAX_CTLR; i++) { if (hba[i] != NULL) { printk(KERN_WARNING "cciss: had to remove" " controller %d\n", i); cciss_remove_one(hba[i]->pdev); } } kthread_stop(cciss_scan_thread); remove_proc_entry("driver/cciss", NULL); bus_unregister(&cciss_bus_type); } static void fail_all_cmds(unsigned long ctlr) { /* If we get here, the board is apparently dead. */ ctlr_info_t *h = hba[ctlr]; CommandList_struct *c; unsigned long flags; printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr); h->alive = 0; /* the controller apparently died... */ spin_lock_irqsave(CCISS_LOCK(ctlr), flags); pci_disable_device(h->pdev); /* Make sure it is really dead. */ /* move everything off the request queue onto the completed queue */ while (!hlist_empty(&h->reqQ)) { c = hlist_entry(h->reqQ.first, CommandList_struct, list); removeQ(c); h->Qdepth--; addQ(&h->cmpQ, c); } /* Now, fail everything on the completed queue with a HW error */ while (!hlist_empty(&h->cmpQ)) { c = hlist_entry(h->cmpQ.first, CommandList_struct, list); removeQ(c); if (c->cmd_type != CMD_MSG_STALE) c->err_info->CommandStatus = CMD_HARDWARE_ERR; if (c->cmd_type == CMD_RWREQ) { complete_command(h, c, 0); } else if (c->cmd_type == CMD_IOCTL_PEND) complete(c->waiting); #ifdef CONFIG_CISS_SCSI_TAPE else if (c->cmd_type == CMD_SCSI) complete_scsi_command(c, 0, 0); #endif } spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); return; } module_init(cciss_init); module_exit(cciss_cleanup);
gpl-2.0
mutoso-mirrors/linux-historical
arch/arm/mach-clps711x/irq.c
56
3205
/* * linux/arch/arm/mach-clps711x/irq.c * * Copyright (C) 2000 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/list.h> #include <asm/mach/irq.h> #include <asm/hardware.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/hardware/clps7111.h> static void int1_mask(unsigned int irq) { u32 intmr1; intmr1 = clps_readl(INTMR1); intmr1 &= ~(1 << irq); clps_writel(intmr1, INTMR1); } static void int1_ack(unsigned int irq) { u32 intmr1; intmr1 = clps_readl(INTMR1); intmr1 &= ~(1 << irq); clps_writel(intmr1, INTMR1); switch (irq) { case IRQ_CSINT: clps_writel(0, COEOI); break; case IRQ_TC1OI: clps_writel(0, TC1EOI); break; case IRQ_TC2OI: clps_writel(0, TC2EOI); break; case IRQ_RTCMI: clps_writel(0, RTCEOI); break; case IRQ_TINT: clps_writel(0, TEOI); break; case IRQ_UMSINT: clps_writel(0, UMSEOI); break; } } static void int1_unmask(unsigned int irq) { u32 intmr1; intmr1 = clps_readl(INTMR1); intmr1 |= 1 << irq; clps_writel(intmr1, INTMR1); } static struct irqchip int1_chip = { .ack = int1_ack, .mask = int1_mask, .unmask = int1_unmask, }; static void int2_mask(unsigned int irq) { u32 intmr2; intmr2 = clps_readl(INTMR2); intmr2 &= ~(1 << (irq - 16)); clps_writel(intmr2, INTMR2); } static void int2_ack(unsigned int irq) { u32 intmr2; intmr2 = clps_readl(INTMR2); intmr2 &= ~(1 << (irq - 16)); clps_writel(intmr2, INTMR2); switch (irq) { case IRQ_KBDINT: clps_writel(0, KBDEOI); break; } } static void int2_unmask(unsigned int irq) { u32 intmr2; intmr2 = clps_readl(INTMR2); intmr2 |= 1 << (irq - 16); clps_writel(intmr2, INTMR2); } static struct irqchip int2_chip = { .ack = int2_ack, .mask = int2_mask, .unmask = int2_unmask, }; void __init clps711x_init_irq(void) { unsigned int i; for (i = 0; i < NR_IRQS; i++) { if (INT1_IRQS & (1 << i)) { set_irq_handler(i, do_level_IRQ); set_irq_chip(i, &int1_chip); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } if (INT2_IRQS & (1 << i)) { set_irq_handler(i, do_level_IRQ); set_irq_chip(i, &int2_chip); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } } /* * Disable interrupts */ clps_writel(0, INTMR1); clps_writel(0, INTMR2); /* * Clear down any pending interrupts */ clps_writel(0, COEOI); clps_writel(0, TC1EOI); clps_writel(0, TC2EOI); clps_writel(0, RTCEOI); clps_writel(0, TEOI); clps_writel(0, UMSEOI); clps_writel(0, SYNCIO); clps_writel(0, KBDEOI); }
gpl-2.0
hoonir/iamroot_hypstudy_5th
drivers/power/power_supply_core.c
56
9559
/* * Universal power supply monitor class * * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> * Copyright © 2004 Szabolcs Gyurko * Copyright © 2003 Ian Molton <spyro@f2s.com> * * Modified: 2004, Oct Szabolcs Gyurko * * You may use this code as per GPL version 2 */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/err.h> #include <linux/power_supply.h> #include <linux/thermal.h> #include "power_supply.h" /* exported for the APM Power driver, APM emulation */ struct class *power_supply_class; EXPORT_SYMBOL_GPL(power_supply_class); static struct device_type power_supply_dev_type; static int __power_supply_changed_work(struct device *dev, void *data) { struct power_supply *psy = (struct power_supply *)data; struct power_supply *pst = dev_get_drvdata(dev); int i; for (i = 0; i < psy->num_supplicants; i++) if (!strcmp(psy->supplied_to[i], pst->name)) { if (pst->external_power_changed) pst->external_power_changed(pst); } return 0; } static void power_supply_changed_work(struct work_struct *work) { struct power_supply *psy = container_of(work, struct power_supply, changed_work); dev_dbg(psy->dev, "%s\n", __func__); class_for_each_device(power_supply_class, NULL, psy, __power_supply_changed_work); power_supply_update_leds(psy); kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE); } void power_supply_changed(struct power_supply *psy) { dev_dbg(psy->dev, "%s\n", __func__); schedule_work(&psy->changed_work); } EXPORT_SYMBOL_GPL(power_supply_changed); static int __power_supply_am_i_supplied(struct device *dev, void *data) { union power_supply_propval ret = {0,}; struct power_supply *psy = (struct power_supply *)data; struct power_supply *epsy = dev_get_drvdata(dev); int i; for (i = 0; i < epsy->num_supplicants; i++) { if (!strcmp(epsy->supplied_to[i], psy->name)) { if (epsy->get_property(epsy, POWER_SUPPLY_PROP_ONLINE, &ret)) continue; if (ret.intval) return ret.intval; } } return 0; } int power_supply_am_i_supplied(struct power_supply *psy) { int error; error = class_for_each_device(power_supply_class, NULL, psy, __power_supply_am_i_supplied); dev_dbg(psy->dev, "%s %d\n", __func__, error); return error; } EXPORT_SYMBOL_GPL(power_supply_am_i_supplied); static int __power_supply_is_system_supplied(struct device *dev, void *data) { union power_supply_propval ret = {0,}; struct power_supply *psy = dev_get_drvdata(dev); unsigned int *count = data; (*count)++; if (psy->type != POWER_SUPPLY_TYPE_BATTERY) { if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &ret)) return 0; if (ret.intval) return ret.intval; } return 0; } int power_supply_is_system_supplied(void) { int error; unsigned int count = 0; error = class_for_each_device(power_supply_class, NULL, &count, __power_supply_is_system_supplied); /* * If no power class device was found at all, most probably we are * running on a desktop system, so assume we are on mains power. */ if (count == 0) return 1; return error; } EXPORT_SYMBOL_GPL(power_supply_is_system_supplied); int power_supply_set_battery_charged(struct power_supply *psy) { if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) { psy->set_charged(psy); return 0; } return -EINVAL; } EXPORT_SYMBOL_GPL(power_supply_set_battery_charged); static int power_supply_match_device_by_name(struct device *dev, const void *data) { const char *name = data; struct power_supply *psy = dev_get_drvdata(dev); return strcmp(psy->name, name) == 0; } struct power_supply *power_supply_get_by_name(const char *name) { struct device *dev = class_find_device(power_supply_class, NULL, name, power_supply_match_device_by_name); return dev ? dev_get_drvdata(dev) : NULL; } EXPORT_SYMBOL_GPL(power_supply_get_by_name); int power_supply_powers(struct power_supply *psy, struct device *dev) { return sysfs_create_link(&psy->dev->kobj, &dev->kobj, "powers"); } EXPORT_SYMBOL_GPL(power_supply_powers); static void power_supply_dev_release(struct device *dev) { pr_debug("device: '%s': %s\n", dev_name(dev), __func__); kfree(dev); } #ifdef CONFIG_THERMAL static int power_supply_read_temp(struct thermal_zone_device *tzd, unsigned long *temp) { struct power_supply *psy; union power_supply_propval val; int ret; WARN_ON(tzd == NULL); psy = tzd->devdata; ret = psy->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val); /* Convert tenths of degree Celsius to milli degree Celsius. */ if (!ret) *temp = val.intval * 100; return ret; } static struct thermal_zone_device_ops psy_tzd_ops = { .get_temp = power_supply_read_temp, }; static int psy_register_thermal(struct power_supply *psy) { int i; /* Register battery zone device psy reports temperature */ for (i = 0; i < psy->num_properties; i++) { if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) { psy->tzd = thermal_zone_device_register(psy->name, 0, 0, psy, &psy_tzd_ops, NULL, 0, 0); if (IS_ERR(psy->tzd)) return PTR_ERR(psy->tzd); break; } } return 0; } static void psy_unregister_thermal(struct power_supply *psy) { if (IS_ERR_OR_NULL(psy->tzd)) return; thermal_zone_device_unregister(psy->tzd); } /* thermal cooling device callbacks */ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd, unsigned long *state) { struct power_supply *psy; union power_supply_propval val; int ret; psy = tcd->devdata; ret = psy->get_property(psy, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val); if (!ret) *state = val.intval; return ret; } static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd, unsigned long *state) { struct power_supply *psy; union power_supply_propval val; int ret; psy = tcd->devdata; ret = psy->get_property(psy, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val); if (!ret) *state = val.intval; return ret; } static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd, unsigned long state) { struct power_supply *psy; union power_supply_propval val; int ret; psy = tcd->devdata; val.intval = state; ret = psy->set_property(psy, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val); return ret; } static struct thermal_cooling_device_ops psy_tcd_ops = { .get_max_state = ps_get_max_charge_cntl_limit, .get_cur_state = ps_get_cur_chrage_cntl_limit, .set_cur_state = ps_set_cur_charge_cntl_limit, }; static int psy_register_cooler(struct power_supply *psy) { int i; /* Register for cooling device if psy can control charging */ for (i = 0; i < psy->num_properties; i++) { if (psy->properties[i] == POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT) { psy->tcd = thermal_cooling_device_register( (char *)psy->name, psy, &psy_tcd_ops); if (IS_ERR(psy->tcd)) return PTR_ERR(psy->tcd); break; } } return 0; } static void psy_unregister_cooler(struct power_supply *psy) { if (IS_ERR_OR_NULL(psy->tcd)) return; thermal_cooling_device_unregister(psy->tcd); } #else static int psy_register_thermal(struct power_supply *psy) { return 0; } static void psy_unregister_thermal(struct power_supply *psy) { } static int psy_register_cooler(struct power_supply *psy) { return 0; } static void psy_unregister_cooler(struct power_supply *psy) { } #endif int power_supply_register(struct device *parent, struct power_supply *psy) { struct device *dev; int rc; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; device_initialize(dev); dev->class = power_supply_class; dev->type = &power_supply_dev_type; dev->parent = parent; dev->release = power_supply_dev_release; dev_set_drvdata(dev, psy); psy->dev = dev; INIT_WORK(&psy->changed_work, power_supply_changed_work); rc = kobject_set_name(&dev->kobj, "%s", psy->name); if (rc) goto kobject_set_name_failed; rc = device_add(dev); if (rc) goto device_add_failed; rc = psy_register_thermal(psy); if (rc) goto register_thermal_failed; rc = psy_register_cooler(psy); if (rc) goto register_cooler_failed; rc = power_supply_create_triggers(psy); if (rc) goto create_triggers_failed; power_supply_changed(psy); goto success; create_triggers_failed: psy_unregister_cooler(psy); register_cooler_failed: psy_unregister_thermal(psy); register_thermal_failed: device_del(dev); kobject_set_name_failed: device_add_failed: put_device(dev); success: return rc; } EXPORT_SYMBOL_GPL(power_supply_register); void power_supply_unregister(struct power_supply *psy) { cancel_work_sync(&psy->changed_work); sysfs_remove_link(&psy->dev->kobj, "powers"); power_supply_remove_triggers(psy); psy_unregister_cooler(psy); psy_unregister_thermal(psy); device_unregister(psy->dev); } EXPORT_SYMBOL_GPL(power_supply_unregister); static int __init power_supply_class_init(void) { power_supply_class = class_create(THIS_MODULE, "power_supply"); if (IS_ERR(power_supply_class)) return PTR_ERR(power_supply_class); power_supply_class->dev_uevent = power_supply_uevent; power_supply_init_attrs(&power_supply_dev_type); return 0; } static void __exit power_supply_class_exit(void) { class_destroy(power_supply_class); } subsys_initcall(power_supply_class_init); module_exit(power_supply_class_exit); MODULE_DESCRIPTION("Universal power supply monitor class"); MODULE_AUTHOR("Ian Molton <spyro@f2s.com>, " "Szabolcs Gyurko, " "Anton Vorontsov <cbou@mail.ru>"); MODULE_LICENSE("GPL");
gpl-2.0
tempesta-tech/linux-4.1-tfw
drivers/net/ethernet/amd/au1000_eth.c
824
37949
/* * * Alchemy Au1x00 ethernet driver * * Copyright 2001-2003, 2006 MontaVista Software Inc. * Copyright 2002 TimeSys Corp. * Added ethtool/mii-tool support, * Copyright 2004 Matt Porter <mporter@kernel.crashing.org> * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de * or riemer@riemer-nt.de: fixed the link beat detection with * ioctls (SIOCGMIIPHY) * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org> * converted to use linux-2.6.x's PHY framework * * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * ######################################################################## * * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/ioport.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <linux/io.h> #include <asm/mipsregs.h> #include <asm/irq.h> #include <asm/processor.h> #include <au1000.h> #include <au1xxx_eth.h> #include <prom.h> #include "au1000_eth.h" #ifdef AU1000_ETH_DEBUG static int au1000_debug = 5; #else static int au1000_debug = 3; #endif #define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK) #define DRV_NAME "au1000_eth" #define DRV_VERSION "1.7" #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>" #define DRV_DESC "Au1xxx on-chip Ethernet driver" MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); /* AU1000 MAC registers and bits */ #define MAC_CONTROL 0x0 # define MAC_RX_ENABLE (1 << 2) # define MAC_TX_ENABLE (1 << 3) # define MAC_DEF_CHECK (1 << 5) # define MAC_SET_BL(X) (((X) & 0x3) << 6) # define MAC_AUTO_PAD (1 << 8) # define MAC_DISABLE_RETRY (1 << 10) # define MAC_DISABLE_BCAST (1 << 11) # define MAC_LATE_COL (1 << 12) # define MAC_HASH_MODE (1 << 13) # define MAC_HASH_ONLY (1 << 15) # define MAC_PASS_ALL (1 << 16) # define MAC_INVERSE_FILTER (1 << 17) # define MAC_PROMISCUOUS (1 << 18) # define MAC_PASS_ALL_MULTI (1 << 19) # define MAC_FULL_DUPLEX (1 << 20) # define MAC_NORMAL_MODE 0 # define MAC_INT_LOOPBACK (1 << 21) # define MAC_EXT_LOOPBACK (1 << 22) # define MAC_DISABLE_RX_OWN (1 << 23) # define MAC_BIG_ENDIAN (1 << 30) # define MAC_RX_ALL (1 << 31) #define MAC_ADDRESS_HIGH 0x4 #define MAC_ADDRESS_LOW 0x8 #define MAC_MCAST_HIGH 0xC #define MAC_MCAST_LOW 0x10 #define MAC_MII_CNTRL 0x14 # define MAC_MII_BUSY (1 << 0) # define MAC_MII_READ 0 # define MAC_MII_WRITE (1 << 1) # define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6) # define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11) #define MAC_MII_DATA 0x18 #define MAC_FLOW_CNTRL 0x1C # define MAC_FLOW_CNTRL_BUSY (1 << 0) # define MAC_FLOW_CNTRL_ENABLE (1 << 1) # define MAC_PASS_CONTROL (1 << 2) # define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16) #define MAC_VLAN1_TAG 0x20 #define MAC_VLAN2_TAG 0x24 /* Ethernet Controller Enable */ # define MAC_EN_CLOCK_ENABLE (1 << 0) # define MAC_EN_RESET0 (1 << 1) # define MAC_EN_TOSS (0 << 2) # define MAC_EN_CACHEABLE (1 << 3) # define MAC_EN_RESET1 (1 << 4) # define MAC_EN_RESET2 (1 << 5) # define MAC_DMA_RESET (1 << 6) /* Ethernet Controller DMA Channels */ /* offsets from MAC_TX_RING_ADDR address */ #define MAC_TX_BUFF0_STATUS 0x0 # define TX_FRAME_ABORTED (1 << 0) # define TX_JAB_TIMEOUT (1 << 1) # define TX_NO_CARRIER (1 << 2) # define TX_LOSS_CARRIER (1 << 3) # define TX_EXC_DEF (1 << 4) # define TX_LATE_COLL_ABORT (1 << 5) # define TX_EXC_COLL (1 << 6) # define TX_UNDERRUN (1 << 7) # define TX_DEFERRED (1 << 8) # define TX_LATE_COLL (1 << 9) # define TX_COLL_CNT_MASK (0xF << 10) # define TX_PKT_RETRY (1 << 31) #define MAC_TX_BUFF0_ADDR 0x4 # define TX_DMA_ENABLE (1 << 0) # define TX_T_DONE (1 << 1) # define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3) #define MAC_TX_BUFF0_LEN 0x8 #define MAC_TX_BUFF1_STATUS 0x10 #define MAC_TX_BUFF1_ADDR 0x14 #define MAC_TX_BUFF1_LEN 0x18 #define MAC_TX_BUFF2_STATUS 0x20 #define MAC_TX_BUFF2_ADDR 0x24 #define MAC_TX_BUFF2_LEN 0x28 #define MAC_TX_BUFF3_STATUS 0x30 #define MAC_TX_BUFF3_ADDR 0x34 #define MAC_TX_BUFF3_LEN 0x38 /* offsets from MAC_RX_RING_ADDR */ #define MAC_RX_BUFF0_STATUS 0x0 # define RX_FRAME_LEN_MASK 0x3fff # define RX_WDOG_TIMER (1 << 14) # define RX_RUNT (1 << 15) # define RX_OVERLEN (1 << 16) # define RX_COLL (1 << 17) # define RX_ETHER (1 << 18) # define RX_MII_ERROR (1 << 19) # define RX_DRIBBLING (1 << 20) # define RX_CRC_ERROR (1 << 21) # define RX_VLAN1 (1 << 22) # define RX_VLAN2 (1 << 23) # define RX_LEN_ERROR (1 << 24) # define RX_CNTRL_FRAME (1 << 25) # define RX_U_CNTRL_FRAME (1 << 26) # define RX_MCAST_FRAME (1 << 27) # define RX_BCAST_FRAME (1 << 28) # define RX_FILTER_FAIL (1 << 29) # define RX_PACKET_FILTER (1 << 30) # define RX_MISSED_FRAME (1 << 31) # define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \ RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \ RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME) #define MAC_RX_BUFF0_ADDR 0x4 # define RX_DMA_ENABLE (1 << 0) # define RX_T_DONE (1 << 1) # define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3) # define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0) #define MAC_RX_BUFF1_STATUS 0x10 #define MAC_RX_BUFF1_ADDR 0x14 #define MAC_RX_BUFF2_STATUS 0x20 #define MAC_RX_BUFF2_ADDR 0x24 #define MAC_RX_BUFF3_STATUS 0x30 #define MAC_RX_BUFF3_ADDR 0x34 /* * Theory of operation * * The Au1000 MACs use a simple rx and tx descriptor ring scheme. * There are four receive and four transmit descriptors. These * descriptors are not in memory; rather, they are just a set of * hardware registers. * * Since the Au1000 has a coherent data cache, the receive and * transmit buffers are allocated from the KSEG0 segment. The * hardware registers, however, are still mapped at KSEG1 to * make sure there's no out-of-order writes, and that all writes * complete immediately. */ /* * board-specific configurations * * PHY detection algorithm * * If phy_static_config is undefined, the PHY setup is * autodetected: * * mii_probe() first searches the current MAC's MII bus for a PHY, * selecting the first (or last, if phy_search_highest_addr is * defined) PHY address not already claimed by another netdev. * * If nothing was found that way when searching for the 2nd ethernet * controller's PHY and phy1_search_mac0 is defined, then * the first MII bus is searched as well for an unclaimed PHY; this is * needed in case of a dual-PHY accessible only through the MAC0's MII * bus. * * Finally, if no PHY is found, then the corresponding ethernet * controller is not registered to the network subsystem. */ /* autodetection defaults: phy1_search_mac0 */ /* static PHY setup * * most boards PHY setup should be detectable properly with the * autodetection algorithm in mii_probe(), but in some cases (e.g. if * you have a switch attached, or want to use the PHY's interrupt * notification capabilities) you can provide a static PHY * configuration here * * IRQs may only be set, if a PHY address was configured * If a PHY address is given, also a bus id is required to be set * * ps: make sure the used irqs are configured properly in the board * specific irq-map */ static void au1000_enable_mac(struct net_device *dev, int force_reset) { unsigned long flags; struct au1000_private *aup = netdev_priv(dev); spin_lock_irqsave(&aup->lock, flags); if (force_reset || (!aup->mac_enabled)) { writel(MAC_EN_CLOCK_ENABLE, aup->enable); wmb(); /* drain writebuffer */ mdelay(2); writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE), aup->enable); wmb(); /* drain writebuffer */ mdelay(2); aup->mac_enabled = 1; } spin_unlock_irqrestore(&aup->lock, flags); } /* * MII operations */ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) { struct au1000_private *aup = netdev_priv(dev); u32 *const mii_control_reg = &aup->mac->mii_control; u32 *const mii_data_reg = &aup->mac->mii_data; u32 timedout = 20; u32 mii_control; while (readl(mii_control_reg) & MAC_MII_BUSY) { mdelay(1); if (--timedout == 0) { netdev_err(dev, "read_MII busy timeout!!\n"); return -1; } } mii_control = MAC_SET_MII_SELECT_REG(reg) | MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ; writel(mii_control, mii_control_reg); timedout = 20; while (readl(mii_control_reg) & MAC_MII_BUSY) { mdelay(1); if (--timedout == 0) { netdev_err(dev, "mdio_read busy timeout!!\n"); return -1; } } return readl(mii_data_reg); } static void au1000_mdio_write(struct net_device *dev, int phy_addr, int reg, u16 value) { struct au1000_private *aup = netdev_priv(dev); u32 *const mii_control_reg = &aup->mac->mii_control; u32 *const mii_data_reg = &aup->mac->mii_data; u32 timedout = 20; u32 mii_control; while (readl(mii_control_reg) & MAC_MII_BUSY) { mdelay(1); if (--timedout == 0) { netdev_err(dev, "mdio_write busy timeout!!\n"); return; } } mii_control = MAC_SET_MII_SELECT_REG(reg) | MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE; writel(value, mii_data_reg); writel(mii_control, mii_control_reg); } static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) { /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */ struct net_device *const dev = bus->priv; /* make sure the MAC associated with this * mii_bus is enabled */ au1000_enable_mac(dev, 0); return au1000_mdio_read(dev, phy_addr, regnum); } static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) { struct net_device *const dev = bus->priv; /* make sure the MAC associated with this * mii_bus is enabled */ au1000_enable_mac(dev, 0); au1000_mdio_write(dev, phy_addr, regnum, value); return 0; } static int au1000_mdiobus_reset(struct mii_bus *bus) { struct net_device *const dev = bus->priv; /* make sure the MAC associated with this * mii_bus is enabled */ au1000_enable_mac(dev, 0); return 0; } static void au1000_hard_stop(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); u32 reg; netif_dbg(aup, drv, dev, "hard stop\n"); reg = readl(&aup->mac->control); reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); writel(reg, &aup->mac->control); wmb(); /* drain writebuffer */ mdelay(10); } static void au1000_enable_rx_tx(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); u32 reg; netif_dbg(aup, hw, dev, "enable_rx_tx\n"); reg = readl(&aup->mac->control); reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE); writel(reg, &aup->mac->control); wmb(); /* drain writebuffer */ mdelay(10); } static void au1000_adjust_link(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); struct phy_device *phydev = aup->phy_dev; unsigned long flags; u32 reg; int status_change = 0; BUG_ON(!aup->phy_dev); spin_lock_irqsave(&aup->lock, flags); if (phydev->link && (aup->old_speed != phydev->speed)) { /* speed changed */ switch (phydev->speed) { case SPEED_10: case SPEED_100: break; default: netdev_warn(dev, "Speed (%d) is not 10/100 ???\n", phydev->speed); break; } aup->old_speed = phydev->speed; status_change = 1; } if (phydev->link && (aup->old_duplex != phydev->duplex)) { /* duplex mode changed */ /* switching duplex mode requires to disable rx and tx! */ au1000_hard_stop(dev); reg = readl(&aup->mac->control); if (DUPLEX_FULL == phydev->duplex) { reg |= MAC_FULL_DUPLEX; reg &= ~MAC_DISABLE_RX_OWN; } else { reg &= ~MAC_FULL_DUPLEX; reg |= MAC_DISABLE_RX_OWN; } writel(reg, &aup->mac->control); wmb(); /* drain writebuffer */ mdelay(1); au1000_enable_rx_tx(dev); aup->old_duplex = phydev->duplex; status_change = 1; } if (phydev->link != aup->old_link) { /* link state changed */ if (!phydev->link) { /* link went down */ aup->old_speed = 0; aup->old_duplex = -1; } aup->old_link = phydev->link; status_change = 1; } spin_unlock_irqrestore(&aup->lock, flags); if (status_change) { if (phydev->link) netdev_info(dev, "link up (%d/%s)\n", phydev->speed, DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); else netdev_info(dev, "link down\n"); } } static int au1000_mii_probe(struct net_device *dev) { struct au1000_private *const aup = netdev_priv(dev); struct phy_device *phydev = NULL; int phy_addr; if (aup->phy_static_config) { BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); if (aup->phy_addr) phydev = aup->mii_bus->phy_map[aup->phy_addr]; else netdev_info(dev, "using PHY-less setup\n"); return 0; } /* find the first (lowest address) PHY * on the current MAC's MII bus */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) if (aup->mii_bus->phy_map[phy_addr]) { phydev = aup->mii_bus->phy_map[phy_addr]; if (!aup->phy_search_highest_addr) /* break out with first one found */ break; } if (aup->phy1_search_mac0) { /* try harder to find a PHY */ if (!phydev && (aup->mac_id == 1)) { /* no PHY found, maybe we have a dual PHY? */ dev_info(&dev->dev, ": no PHY found on MAC1, " "let's see if it's attached to MAC0...\n"); /* find the first (lowest address) non-attached * PHY on the MAC0 MII bus */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { struct phy_device *const tmp_phydev = aup->mii_bus->phy_map[phy_addr]; if (aup->mac_id == 1) break; /* no PHY here... */ if (!tmp_phydev) continue; /* already claimed by MAC0 */ if (tmp_phydev->attached_dev) continue; phydev = tmp_phydev; break; /* found it */ } } } if (!phydev) { netdev_err(dev, "no PHY found\n"); return -1; } /* now we are supposed to have a proper phydev, to attach to... */ BUG_ON(phydev->attached_dev); phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { netdev_err(dev, "Could not attach to PHY\n"); return PTR_ERR(phydev); } /* mask with MAC supported features */ phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ | SUPPORTED_MII | SUPPORTED_TP); phydev->advertising = phydev->supported; aup->old_link = 0; aup->old_speed = 0; aup->old_duplex = -1; aup->phy_dev = phydev; netdev_info(dev, "attached PHY driver [%s] " "(mii_bus:phy_addr=%s, irq=%d)\n", phydev->drv->name, dev_name(&phydev->dev), phydev->irq); return 0; } /* * Buffer allocation/deallocation routines. The buffer descriptor returned * has the virtual and dma address of a buffer suitable for * both, receive and transmit operations. */ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup) { struct db_dest *pDB; pDB = aup->pDBfree; if (pDB) aup->pDBfree = pDB->pnext; return pDB; } void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB) { struct db_dest *pDBfree = aup->pDBfree; if (pDBfree) pDBfree->pnext = pDB; aup->pDBfree = pDB; } static void au1000_reset_mac_unlocked(struct net_device *dev) { struct au1000_private *const aup = netdev_priv(dev); int i; au1000_hard_stop(dev); writel(MAC_EN_CLOCK_ENABLE, aup->enable); wmb(); /* drain writebuffer */ mdelay(2); writel(0, aup->enable); wmb(); /* drain writebuffer */ mdelay(2); aup->tx_full = 0; for (i = 0; i < NUM_RX_DMA; i++) { /* reset control bits */ aup->rx_dma_ring[i]->buff_stat &= ~0xf; } for (i = 0; i < NUM_TX_DMA; i++) { /* reset control bits */ aup->tx_dma_ring[i]->buff_stat &= ~0xf; } aup->mac_enabled = 0; } static void au1000_reset_mac(struct net_device *dev) { struct au1000_private *const aup = netdev_priv(dev); unsigned long flags; netif_dbg(aup, hw, dev, "reset mac, aup %x\n", (unsigned)aup); spin_lock_irqsave(&aup->lock, flags); au1000_reset_mac_unlocked(dev); spin_unlock_irqrestore(&aup->lock, flags); } /* * Setup the receive and transmit "rings". These pointers are the addresses * of the rx and tx MAC DMA registers so they are fixed by the hardware -- * these are not descriptors sitting in memory. */ static void au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base) { int i; for (i = 0; i < NUM_RX_DMA; i++) { aup->rx_dma_ring[i] = (struct rx_dma *) (tx_base + 0x100 + sizeof(struct rx_dma) * i); } for (i = 0; i < NUM_TX_DMA; i++) { aup->tx_dma_ring[i] = (struct tx_dma *) (tx_base + sizeof(struct tx_dma) * i); } } /* * ethtool operations */ static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct au1000_private *aup = netdev_priv(dev); if (aup->phy_dev) return phy_ethtool_gset(aup->phy_dev, cmd); return -EINVAL; } static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct au1000_private *aup = netdev_priv(dev); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (aup->phy_dev) return phy_ethtool_sset(aup->phy_dev, cmd); return -EINVAL; } static void au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct au1000_private *aup = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME, aup->mac_id); info->regdump_len = 0; } static void au1000_set_msglevel(struct net_device *dev, u32 value) { struct au1000_private *aup = netdev_priv(dev); aup->msg_enable = value; } static u32 au1000_get_msglevel(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); return aup->msg_enable; } static const struct ethtool_ops au1000_ethtool_ops = { .get_settings = au1000_get_settings, .set_settings = au1000_set_settings, .get_drvinfo = au1000_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = au1000_get_msglevel, .set_msglevel = au1000_set_msglevel, }; /* * Initialize the interface. * * When the device powers up, the clocks are disabled and the * mac is in reset state. When the interface is closed, we * do the same -- reset the device and disable the clocks to * conserve power. Thus, whenever au1000_init() is called, * the device should already be in reset state. */ static int au1000_init(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); unsigned long flags; int i; u32 control; netif_dbg(aup, hw, dev, "au1000_init\n"); /* bring the device out of reset */ au1000_enable_mac(dev, 1); spin_lock_irqsave(&aup->lock, flags); writel(0, &aup->mac->control); aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; aup->tx_tail = aup->tx_head; aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2; writel(dev->dev_addr[5]<<8 | dev->dev_addr[4], &aup->mac->mac_addr_high); writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 | dev->dev_addr[1]<<8 | dev->dev_addr[0], &aup->mac->mac_addr_low); for (i = 0; i < NUM_RX_DMA; i++) aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; wmb(); /* drain writebuffer */ control = MAC_RX_ENABLE | MAC_TX_ENABLE; #ifndef CONFIG_CPU_LITTLE_ENDIAN control |= MAC_BIG_ENDIAN; #endif if (aup->phy_dev) { if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex)) control |= MAC_FULL_DUPLEX; else control |= MAC_DISABLE_RX_OWN; } else { /* PHY-less op, assume full-duplex */ control |= MAC_FULL_DUPLEX; } writel(control, &aup->mac->control); writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */ wmb(); /* drain writebuffer */ spin_unlock_irqrestore(&aup->lock, flags); return 0; } static inline void au1000_update_rx_stats(struct net_device *dev, u32 status) { struct net_device_stats *ps = &dev->stats; ps->rx_packets++; if (status & RX_MCAST_FRAME) ps->multicast++; if (status & RX_ERROR) { ps->rx_errors++; if (status & RX_MISSED_FRAME) ps->rx_missed_errors++; if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) ps->rx_length_errors++; if (status & RX_CRC_ERROR) ps->rx_crc_errors++; if (status & RX_COLL) ps->collisions++; } else ps->rx_bytes += status & RX_FRAME_LEN_MASK; } /* * Au1000 receive routine. */ static int au1000_rx(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); struct sk_buff *skb; struct rx_dma *prxd; u32 buff_stat, status; struct db_dest *pDB; u32 frmlen; netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; while (buff_stat & RX_T_DONE) { status = prxd->status; pDB = aup->rx_db_inuse[aup->rx_head]; au1000_update_rx_stats(dev, status); if (!(status & RX_ERROR)) { /* good frame */ frmlen = (status & RX_FRAME_LEN_MASK); frmlen -= 4; /* Remove FCS */ skb = netdev_alloc_skb(dev, frmlen + 2); if (skb == NULL) { dev->stats.rx_dropped++; continue; } skb_reserve(skb, 2); /* 16 byte IP header align */ skb_copy_to_linear_data(skb, (unsigned char *)pDB->vaddr, frmlen); skb_put(skb, frmlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); /* pass the packet to upper layers */ } else { if (au1000_debug > 4) { pr_err("rx_error(s):"); if (status & RX_MISSED_FRAME) pr_cont(" miss"); if (status & RX_WDOG_TIMER) pr_cont(" wdog"); if (status & RX_RUNT) pr_cont(" runt"); if (status & RX_OVERLEN) pr_cont(" overlen"); if (status & RX_COLL) pr_cont(" coll"); if (status & RX_MII_ERROR) pr_cont(" mii error"); if (status & RX_CRC_ERROR) pr_cont(" crc error"); if (status & RX_LEN_ERROR) pr_cont(" len error"); if (status & RX_U_CNTRL_FRAME) pr_cont(" u control frame"); pr_cont("\n"); } } prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); wmb(); /* drain writebuffer */ /* next descriptor */ prxd = aup->rx_dma_ring[aup->rx_head]; buff_stat = prxd->buff_stat; } return 0; } static void au1000_update_tx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = netdev_priv(dev); struct net_device_stats *ps = &dev->stats; if (status & TX_FRAME_ABORTED) { if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { /* any other tx errors are only valid * in half duplex mode */ ps->tx_errors++; ps->tx_aborted_errors++; } } else { ps->tx_errors++; ps->tx_aborted_errors++; if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) ps->tx_carrier_errors++; } } } /* * Called from the interrupt service routine to acknowledge * the TX DONE bits. This is a must if the irq is setup as * edge triggered. */ static void au1000_tx_ack(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); struct tx_dma *ptxd; ptxd = aup->tx_dma_ring[aup->tx_tail]; while (ptxd->buff_stat & TX_T_DONE) { au1000_update_tx_stats(dev, ptxd->status); ptxd->buff_stat &= ~TX_T_DONE; ptxd->len = 0; wmb(); /* drain writebuffer */ aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); ptxd = aup->tx_dma_ring[aup->tx_tail]; if (aup->tx_full) { aup->tx_full = 0; netif_wake_queue(dev); } } } /* * Au1000 interrupt service routine. */ static irqreturn_t au1000_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; /* Handle RX interrupts first to minimize chance of overrun */ au1000_rx(dev); au1000_tx_ack(dev); return IRQ_RETVAL(1); } static int au1000_open(struct net_device *dev) { int retval; struct au1000_private *aup = netdev_priv(dev); netif_dbg(aup, drv, dev, "open: dev=%p\n", dev); retval = request_irq(dev->irq, au1000_interrupt, 0, dev->name, dev); if (retval) { netdev_err(dev, "unable to get IRQ %d\n", dev->irq); return retval; } retval = au1000_init(dev); if (retval) { netdev_err(dev, "error in au1000_init\n"); free_irq(dev->irq, dev); return retval; } if (aup->phy_dev) { /* cause the PHY state machine to schedule a link state check */ aup->phy_dev->state = PHY_CHANGELINK; phy_start(aup->phy_dev); } netif_start_queue(dev); netif_dbg(aup, drv, dev, "open: Initialization done.\n"); return 0; } static int au1000_close(struct net_device *dev) { unsigned long flags; struct au1000_private *const aup = netdev_priv(dev); netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); if (aup->phy_dev) phy_stop(aup->phy_dev); spin_lock_irqsave(&aup->lock, flags); au1000_reset_mac_unlocked(dev); /* stop the device */ netif_stop_queue(dev); /* disable the interrupt */ free_irq(dev->irq, dev); spin_unlock_irqrestore(&aup->lock, flags); return 0; } /* * Au1000 transmit routine. */ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); struct net_device_stats *ps = &dev->stats; struct tx_dma *ptxd; u32 buff_stat; struct db_dest *pDB; int i; netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", (unsigned)aup, skb->len, skb->data, aup->tx_head); ptxd = aup->tx_dma_ring[aup->tx_head]; buff_stat = ptxd->buff_stat; if (buff_stat & TX_DMA_ENABLE) { /* We've wrapped around and the transmitter is still busy */ netif_stop_queue(dev); aup->tx_full = 1; return NETDEV_TX_BUSY; } else if (buff_stat & TX_T_DONE) { au1000_update_tx_stats(dev, ptxd->status); ptxd->len = 0; } if (aup->tx_full) { aup->tx_full = 0; netif_wake_queue(dev); } pDB = aup->tx_db_inuse[aup->tx_head]; skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); if (skb->len < ETH_ZLEN) { for (i = skb->len; i < ETH_ZLEN; i++) ((char *)pDB->vaddr)[i] = 0; ptxd->len = ETH_ZLEN; } else ptxd->len = skb->len; ps->tx_packets++; ps->tx_bytes += ptxd->len; ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; wmb(); /* drain writebuffer */ dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); return NETDEV_TX_OK; } /* * The Tx ring has been full longer than the watchdog timeout * value. The transmitter must be hung? */ static void au1000_tx_timeout(struct net_device *dev) { netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); au1000_reset_mac(dev); au1000_init(dev); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } static void au1000_multicast_list(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); u32 reg; netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags); reg = readl(&aup->mac->control); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ reg |= MAC_PROMISCUOUS; } else if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { reg |= MAC_PASS_ALL_MULTI; reg &= ~MAC_PROMISCUOUS; netdev_info(dev, "Pass all multicast\n"); } else { struct netdev_hw_addr *ha; u32 mc_filter[2]; /* Multicast hash filter */ mc_filter[1] = mc_filter[0] = 0; netdev_for_each_mc_addr(ha, dev) set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, (long *)mc_filter); writel(mc_filter[1], &aup->mac->multi_hash_high); writel(mc_filter[0], &aup->mac->multi_hash_low); reg &= ~MAC_PROMISCUOUS; reg |= MAC_HASH_MODE; } writel(reg, &aup->mac->control); } static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct au1000_private *aup = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; if (!aup->phy_dev) return -EINVAL; /* PHY not controllable */ return phy_mii_ioctl(aup->phy_dev, rq, cmd); } static const struct net_device_ops au1000_netdev_ops = { .ndo_open = au1000_open, .ndo_stop = au1000_close, .ndo_start_xmit = au1000_tx, .ndo_set_rx_mode = au1000_multicast_list, .ndo_do_ioctl = au1000_ioctl, .ndo_tx_timeout = au1000_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, }; static int au1000_probe(struct platform_device *pdev) { struct au1000_private *aup = NULL; struct au1000_eth_platform_data *pd; struct net_device *dev = NULL; struct db_dest *pDB, *pDBfree; int irq, i, err = 0; struct resource *base, *macen, *macdma; base = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!base) { dev_err(&pdev->dev, "failed to retrieve base register\n"); err = -ENODEV; goto out; } macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!macen) { dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n"); err = -ENODEV; goto out; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to retrieve IRQ\n"); err = -ENODEV; goto out; } macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!macdma) { dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n"); err = -ENODEV; goto out; } if (!request_mem_region(base->start, resource_size(base), pdev->name)) { dev_err(&pdev->dev, "failed to request memory region for base registers\n"); err = -ENXIO; goto out; } if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) { dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); err = -ENXIO; goto err_request; } if (!request_mem_region(macdma->start, resource_size(macdma), pdev->name)) { dev_err(&pdev->dev, "failed to request MACDMA memory region\n"); err = -ENXIO; goto err_macdma; } dev = alloc_etherdev(sizeof(struct au1000_private)); if (!dev) { err = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); aup = netdev_priv(dev); spin_lock_init(&aup->lock); aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug); /* Allocate the data buffers * Snooping works fine with eth on all au1xxx */ aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), &aup->dma_addr, 0); if (!aup->vaddr) { dev_err(&pdev->dev, "failed to allocate data buffers\n"); err = -ENOMEM; goto err_vaddr; } /* aup->mac is the base address of the MAC's registers */ aup->mac = (struct mac_reg *) ioremap_nocache(base->start, resource_size(base)); if (!aup->mac) { dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); err = -ENXIO; goto err_remap1; } /* Setup some variables for quick register address access */ aup->enable = (u32 *)ioremap_nocache(macen->start, resource_size(macen)); if (!aup->enable) { dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); err = -ENXIO; goto err_remap2; } aup->mac_id = pdev->id; aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma)); if (!aup->macdma) { dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n"); err = -ENXIO; goto err_remap3; } au1000_setup_hw_rings(aup, aup->macdma); writel(0, aup->enable); aup->mac_enabled = 0; pd = dev_get_platdata(&pdev->dev); if (!pd) { dev_info(&pdev->dev, "no platform_data passed," " PHY search on MAC0\n"); aup->phy1_search_mac0 = 1; } else { if (is_valid_ether_addr(pd->mac)) { memcpy(dev->dev_addr, pd->mac, ETH_ALEN); } else { /* Set a random MAC since no valid provided by platform_data. */ eth_hw_addr_random(dev); } aup->phy_static_config = pd->phy_static_config; aup->phy_search_highest_addr = pd->phy_search_highest_addr; aup->phy1_search_mac0 = pd->phy1_search_mac0; aup->phy_addr = pd->phy_addr; aup->phy_busid = pd->phy_busid; aup->phy_irq = pd->phy_irq; } if (aup->phy_busid && aup->phy_busid > 0) { dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n"); err = -ENODEV; goto err_mdiobus_alloc; } aup->mii_bus = mdiobus_alloc(); if (aup->mii_bus == NULL) { dev_err(&pdev->dev, "failed to allocate mdiobus structure\n"); err = -ENOMEM; goto err_mdiobus_alloc; } aup->mii_bus->priv = dev; aup->mii_bus->read = au1000_mdiobus_read; aup->mii_bus->write = au1000_mdiobus_write; aup->mii_bus->reset = au1000_mdiobus_reset; aup->mii_bus->name = "au1000_eth_mii"; snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, aup->mac_id); aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (aup->mii_bus->irq == NULL) { err = -ENOMEM; goto err_out; } for (i = 0; i < PHY_MAX_ADDR; ++i) aup->mii_bus->irq[i] = PHY_POLL; /* if known, set corresponding PHY IRQs */ if (aup->phy_static_config) if (aup->phy_irq && aup->phy_busid == aup->mac_id) aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq; err = mdiobus_register(aup->mii_bus); if (err) { dev_err(&pdev->dev, "failed to register MDIO bus\n"); goto err_mdiobus_reg; } err = au1000_mii_probe(dev); if (err != 0) goto err_out; pDBfree = NULL; /* setup the data buffer descriptors and attach a buffer to each one */ pDB = aup->db; for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { pDB->pnext = pDBfree; pDBfree = pDB; pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); pDB++; } aup->pDBfree = pDBfree; err = -ENODEV; for (i = 0; i < NUM_RX_DMA; i++) { pDB = au1000_GetFreeDB(aup); if (!pDB) goto err_out; aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; aup->rx_db_inuse[i] = pDB; } err = -ENODEV; for (i = 0; i < NUM_TX_DMA; i++) { pDB = au1000_GetFreeDB(aup); if (!pDB) goto err_out; aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; aup->tx_dma_ring[i]->len = 0; aup->tx_db_inuse[i] = pDB; } dev->base_addr = base->start; dev->irq = irq; dev->netdev_ops = &au1000_netdev_ops; dev->ethtool_ops = &au1000_ethtool_ops; dev->watchdog_timeo = ETH_TX_TIMEOUT; /* * The boot code uses the ethernet controller, so reset it to start * fresh. au1000_init() expects that the device is in reset state. */ au1000_reset_mac(dev); err = register_netdev(dev); if (err) { netdev_err(dev, "Cannot register net device, aborting.\n"); goto err_out; } netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", (unsigned long)base->start, irq); pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); return 0; err_out: if (aup->mii_bus != NULL) mdiobus_unregister(aup->mii_bus); /* here we should have a valid dev plus aup-> register addresses * so we can reset the mac properly. */ au1000_reset_mac(dev); for (i = 0; i < NUM_RX_DMA; i++) { if (aup->rx_db_inuse[i]) au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); } for (i = 0; i < NUM_TX_DMA; i++) { if (aup->tx_db_inuse[i]) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); } err_mdiobus_reg: mdiobus_free(aup->mii_bus); err_mdiobus_alloc: iounmap(aup->macdma); err_remap3: iounmap(aup->enable); err_remap2: iounmap(aup->mac); err_remap1: dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), (void *)aup->vaddr, aup->dma_addr); err_vaddr: free_netdev(dev); err_alloc: release_mem_region(macdma->start, resource_size(macdma)); err_macdma: release_mem_region(macen->start, resource_size(macen)); err_request: release_mem_region(base->start, resource_size(base)); out: return err; } static int au1000_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct au1000_private *aup = netdev_priv(dev); int i; struct resource *base, *macen; unregister_netdev(dev); mdiobus_unregister(aup->mii_bus); mdiobus_free(aup->mii_bus); for (i = 0; i < NUM_RX_DMA; i++) if (aup->rx_db_inuse[i]) au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); for (i = 0; i < NUM_TX_DMA; i++) if (aup->tx_db_inuse[i]) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), (void *)aup->vaddr, aup->dma_addr); iounmap(aup->macdma); iounmap(aup->mac); iounmap(aup->enable); base = platform_get_resource(pdev, IORESOURCE_MEM, 2); release_mem_region(base->start, resource_size(base)); base = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(base->start, resource_size(base)); macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); release_mem_region(macen->start, resource_size(macen)); free_netdev(dev); return 0; } static struct platform_driver au1000_eth_driver = { .probe = au1000_probe, .remove = au1000_remove, .driver = { .name = "au1000-eth", }, }; module_platform_driver(au1000_eth_driver); MODULE_ALIAS("platform:au1000-eth");
gpl-2.0
jdkernel/jdkernel_vigor_2.6.35
net/lapb/lapb_timer.c
5176
4566
/* * LAPB release 002 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * History * LAPB 001 Jonathan Naylor Started Coding * LAPB 002 Jonathan Naylor New timer architecture. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/lapb.h> static void lapb_t1timer_expiry(unsigned long); static void lapb_t2timer_expiry(unsigned long); void lapb_start_t1timer(struct lapb_cb *lapb) { del_timer(&lapb->t1timer); lapb->t1timer.data = (unsigned long)lapb; lapb->t1timer.function = &lapb_t1timer_expiry; lapb->t1timer.expires = jiffies + lapb->t1; add_timer(&lapb->t1timer); } void lapb_start_t2timer(struct lapb_cb *lapb) { del_timer(&lapb->t2timer); lapb->t2timer.data = (unsigned long)lapb; lapb->t2timer.function = &lapb_t2timer_expiry; lapb->t2timer.expires = jiffies + lapb->t2; add_timer(&lapb->t2timer); } void lapb_stop_t1timer(struct lapb_cb *lapb) { del_timer(&lapb->t1timer); } void lapb_stop_t2timer(struct lapb_cb *lapb) { del_timer(&lapb->t2timer); } int lapb_t1timer_running(struct lapb_cb *lapb) { return timer_pending(&lapb->t1timer); } static void lapb_t2timer_expiry(unsigned long param) { struct lapb_cb *lapb = (struct lapb_cb *)param; if (lapb->condition & LAPB_ACK_PENDING_CONDITION) { lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; lapb_timeout_response(lapb); } } static void lapb_t1timer_expiry(unsigned long param) { struct lapb_cb *lapb = (struct lapb_cb *)param; switch (lapb->state) { /* * If we are a DCE, keep going DM .. DM .. DM */ case LAPB_STATE_0: if (lapb->mode & LAPB_DCE) lapb_send_control(lapb, LAPB_DM, LAPB_POLLOFF, LAPB_RESPONSE); break; /* * Awaiting connection state, send SABM(E), up to N2 times. */ case LAPB_STATE_1: if (lapb->n2count == lapb->n2) { lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); #if LAPB_DEBUG > 0 printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev); #endif return; } else { lapb->n2count++; if (lapb->mode & LAPB_EXTENDED) { #if LAPB_DEBUG > 1 printk(KERN_DEBUG "lapb: (%p) S1 TX SABME(1)\n", lapb->dev); #endif lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); } else { #if LAPB_DEBUG > 1 printk(KERN_DEBUG "lapb: (%p) S1 TX SABM(1)\n", lapb->dev); #endif lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); } } break; /* * Awaiting disconnection state, send DISC, up to N2 times. */ case LAPB_STATE_2: if (lapb->n2count == lapb->n2) { lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT); #if LAPB_DEBUG > 0 printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev); #endif return; } else { lapb->n2count++; #if LAPB_DEBUG > 1 printk(KERN_DEBUG "lapb: (%p) S2 TX DISC(1)\n", lapb->dev); #endif lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); } break; /* * Data transfer state, restransmit I frames, up to N2 times. */ case LAPB_STATE_3: if (lapb->n2count == lapb->n2) { lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_stop_t2timer(lapb); lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); #if LAPB_DEBUG > 0 printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev); #endif return; } else { lapb->n2count++; lapb_requeue_frames(lapb); } break; /* * Frame reject state, restransmit FRMR frames, up to N2 times. */ case LAPB_STATE_4: if (lapb->n2count == lapb->n2) { lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); #if LAPB_DEBUG > 0 printk(KERN_DEBUG "lapb: (%p) S4 -> S0\n", lapb->dev); #endif return; } else { lapb->n2count++; lapb_transmit_frmr(lapb); } break; } lapb_start_t1timer(lapb); }
gpl-2.0
Nicklas373/Hana-Kernel_MSM8627-AOSP_7.0
drivers/input/touchscreen/hp680_ts_input.c
9784
2949
#include <linux/input.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/adc.h> #include <mach/hp6xx.h> #define MODNAME "hp680_ts_input" #define HP680_TS_ABS_X_MIN 40 #define HP680_TS_ABS_X_MAX 950 #define HP680_TS_ABS_Y_MIN 80 #define HP680_TS_ABS_Y_MAX 910 #define PHDR 0xa400012e #define SCPDR 0xa4000136 static void do_softint(struct work_struct *work); static struct input_dev *hp680_ts_dev; static DECLARE_DELAYED_WORK(work, do_softint); static void do_softint(struct work_struct *work) { int absx = 0, absy = 0; u8 scpdr; int touched = 0; if (__raw_readb(PHDR) & PHDR_TS_PEN_DOWN) { scpdr = __raw_readb(SCPDR); scpdr |= SCPDR_TS_SCAN_ENABLE; scpdr &= ~SCPDR_TS_SCAN_Y; __raw_writeb(scpdr, SCPDR); udelay(30); absy = adc_single(ADC_CHANNEL_TS_Y); scpdr = __raw_readb(SCPDR); scpdr |= SCPDR_TS_SCAN_Y; scpdr &= ~SCPDR_TS_SCAN_X; __raw_writeb(scpdr, SCPDR); udelay(30); absx = adc_single(ADC_CHANNEL_TS_X); scpdr = __raw_readb(SCPDR); scpdr |= SCPDR_TS_SCAN_X; scpdr &= ~SCPDR_TS_SCAN_ENABLE; __raw_writeb(scpdr, SCPDR); udelay(100); touched = __raw_readb(PHDR) & PHDR_TS_PEN_DOWN; } if (touched) { input_report_key(hp680_ts_dev, BTN_TOUCH, 1); input_report_abs(hp680_ts_dev, ABS_X, absx); input_report_abs(hp680_ts_dev, ABS_Y, absy); } else { input_report_key(hp680_ts_dev, BTN_TOUCH, 0); } input_sync(hp680_ts_dev); enable_irq(HP680_TS_IRQ); } static irqreturn_t hp680_ts_interrupt(int irq, void *dev) { disable_irq_nosync(irq); schedule_delayed_work(&work, HZ / 20); return IRQ_HANDLED; } static int __init hp680_ts_init(void) { int err; hp680_ts_dev = input_allocate_device(); if (!hp680_ts_dev) return -ENOMEM; hp680_ts_dev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); hp680_ts_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(hp680_ts_dev, ABS_X, HP680_TS_ABS_X_MIN, HP680_TS_ABS_X_MAX, 0, 0); input_set_abs_params(hp680_ts_dev, ABS_Y, HP680_TS_ABS_Y_MIN, HP680_TS_ABS_Y_MAX, 0, 0); hp680_ts_dev->name = "HP Jornada touchscreen"; hp680_ts_dev->phys = "hp680_ts/input0"; if (request_irq(HP680_TS_IRQ, hp680_ts_interrupt, 0, MODNAME, NULL) < 0) { printk(KERN_ERR "hp680_touchscreen.c: Can't allocate irq %d\n", HP680_TS_IRQ); err = -EBUSY; goto fail1; } err = input_register_device(hp680_ts_dev); if (err) goto fail2; return 0; fail2: free_irq(HP680_TS_IRQ, NULL); cancel_delayed_work_sync(&work); fail1: input_free_device(hp680_ts_dev); return err; } static void __exit hp680_ts_exit(void) { free_irq(HP680_TS_IRQ, NULL); cancel_delayed_work_sync(&work); input_unregister_device(hp680_ts_dev); } module_init(hp680_ts_init); module_exit(hp680_ts_exit); MODULE_AUTHOR("Andriy Skulysh, askulysh@image.kiev.ua"); MODULE_DESCRIPTION("HP Jornada 680 touchscreen driver"); MODULE_LICENSE("GPL");
gpl-2.0
yamahata/linux-umem
drivers/misc/sgi-gru/gruhandles.c
13368
5536
/* * GRU KERNEL MCS INSTRUCTIONS * * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include "gru.h" #include "grulib.h" #include "grutables.h" /* 10 sec */ #ifdef CONFIG_IA64 #include <asm/processor.h> #define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10) #define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq) #else #include <asm/tsc.h> #define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) #define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz) #endif /* Extract the status field from a kernel handle */ #define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3) struct mcs_op_statistic mcs_op_statistics[mcsop_last]; static void update_mcs_stats(enum mcs_op op, unsigned long clks) { unsigned long nsec; nsec = CLKS2NSEC(clks); atomic_long_inc(&mcs_op_statistics[op].count); atomic_long_add(nsec, &mcs_op_statistics[op].total); if (mcs_op_statistics[op].max < nsec) mcs_op_statistics[op].max = nsec; } static void start_instruction(void *h) { unsigned long *w0 = h; wmb(); /* setting CMD/STATUS bits must be last */ *w0 = *w0 | 0x20001; gru_flush_cache(h); } static void report_instruction_timeout(void *h) { unsigned long goff = GSEGPOFF((unsigned long)h); char *id = "???"; if (TYPE_IS(CCH, goff)) id = "CCH"; else if (TYPE_IS(TGH, goff)) id = "TGH"; else if (TYPE_IS(TFH, goff)) id = "TFH"; panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id); } static int wait_instruction_complete(void *h, enum mcs_op opc) { int status; unsigned long start_time = get_cycles(); while (1) { cpu_relax(); status = GET_MSEG_HANDLE_STATUS(h); if (status != CCHSTATUS_ACTIVE) break; if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) { report_instruction_timeout(h); start_time = get_cycles(); } } if (gru_options & OPT_STATS) update_mcs_stats(opc, get_cycles() - start_time); return status; } int cch_allocate(struct gru_context_configuration_handle *cch) { int ret; cch->opc = CCHOP_ALLOCATE; start_instruction(cch); ret = wait_instruction_complete(cch, cchop_allocate); /* * Stop speculation into the GSEG being mapped by the previous ALLOCATE. * The GSEG memory does not exist until the ALLOCATE completes. */ sync_core(); return ret; } int cch_start(struct gru_context_configuration_handle *cch) { cch->opc = CCHOP_START; start_instruction(cch); return wait_instruction_complete(cch, cchop_start); } int cch_interrupt(struct gru_context_configuration_handle *cch) { cch->opc = CCHOP_INTERRUPT; start_instruction(cch); return wait_instruction_complete(cch, cchop_interrupt); } int cch_deallocate(struct gru_context_configuration_handle *cch) { int ret; cch->opc = CCHOP_DEALLOCATE; start_instruction(cch); ret = wait_instruction_complete(cch, cchop_deallocate); /* * Stop speculation into the GSEG being unmapped by the previous * DEALLOCATE. */ sync_core(); return ret; } int cch_interrupt_sync(struct gru_context_configuration_handle *cch) { cch->opc = CCHOP_INTERRUPT_SYNC; start_instruction(cch); return wait_instruction_complete(cch, cchop_interrupt_sync); } int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, unsigned long vaddrmask, int asid, int pagesize, int global, int n, unsigned short ctxbitmap) { tgh->vaddr = vaddr; tgh->asid = asid; tgh->pagesize = pagesize; tgh->n = n; tgh->global = global; tgh->vaddrmask = vaddrmask; tgh->ctxbitmap = ctxbitmap; tgh->opc = TGHOP_TLBINV; start_instruction(tgh); return wait_instruction_complete(tgh, tghop_invalidate); } int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr, int gaa, unsigned long vaddr, int asid, int dirty, int pagesize) { tfh->fillasid = asid; tfh->fillvaddr = vaddr; tfh->pfn = paddr >> GRU_PADDR_SHIFT; tfh->gaa = gaa; tfh->dirty = dirty; tfh->pagesize = pagesize; tfh->opc = TFHOP_WRITE_ONLY; start_instruction(tfh); return wait_instruction_complete(tfh, tfhop_write_only); } void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, int gaa, unsigned long vaddr, int asid, int dirty, int pagesize) { tfh->fillasid = asid; tfh->fillvaddr = vaddr; tfh->pfn = paddr >> GRU_PADDR_SHIFT; tfh->gaa = gaa; tfh->dirty = dirty; tfh->pagesize = pagesize; tfh->opc = TFHOP_WRITE_RESTART; start_instruction(tfh); } void tfh_restart(struct gru_tlb_fault_handle *tfh) { tfh->opc = TFHOP_RESTART; start_instruction(tfh); } void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh) { tfh->opc = TFHOP_USER_POLLING_MODE; start_instruction(tfh); } void tfh_exception(struct gru_tlb_fault_handle *tfh) { tfh->opc = TFHOP_EXCEPTION; start_instruction(tfh); }
gpl-2.0
careyli/linux-3.16.2
drivers/staging/wlan-ng/hfa384x_usb.c
57
110907
/* src/prism2/driver/hfa384x_usb.c * * Functions that talk to the USB variantof the Intersil hfa384x MAC * * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * -------------------------------------------------------------------- * * linux-wlan * * The contents of this file are subject to the Mozilla Public * License Version 1.1 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU Public License version 2 (the "GPL"), in which * case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use * your version of this file under the MPL, indicate your decision * by deleting the provisions above and replace them with the notice * and other provisions required by the GPL. If you do not delete * the provisions above, a recipient may use your version of this * file under either the MPL or the GPL. * * -------------------------------------------------------------------- * * Inquiries regarding the linux-wlan Open Source project can be * made directly to: * * AbsoluteValue Systems Inc. * info@linux-wlan.com * http://www.linux-wlan.com * * -------------------------------------------------------------------- * * Portions of the development of this software were funded by * Intersil Corporation as part of PRISM(R) chipset product development. * * -------------------------------------------------------------------- * * This file implements functions that correspond to the prism2/hfa384x * 802.11 MAC hardware and firmware host interface. * * The functions can be considered to represent several levels of * abstraction. The lowest level functions are simply C-callable wrappers * around the register accesses. The next higher level represents C-callable * prism2 API functions that match the Intersil documentation as closely * as is reasonable. The next higher layer implements common sequences * of invocations of the API layer (e.g. write to bap, followed by cmd). * * Common sequences: * hfa384x_drvr_xxx Highest level abstractions provided by the * hfa384x code. They are driver defined wrappers * for common sequences. These functions generally * use the services of the lower levels. * * hfa384x_drvr_xxxconfig An example of the drvr level abstraction. These * functions are wrappers for the RID get/set * sequence. They call copy_[to|from]_bap() and * cmd_access(). These functions operate on the * RIDs and buffers without validation. The caller * is responsible for that. * * API wrapper functions: * hfa384x_cmd_xxx functions that provide access to the f/w commands. * The function arguments correspond to each command * argument, even command arguments that get packed * into single registers. These functions _just_ * issue the command by setting the cmd/parm regs * & reading the status/resp regs. Additional * activities required to fully use a command * (read/write from/to bap, get/set int status etc.) * are implemented separately. Think of these as * C-callable prism2 commands. * * Lowest Layer Functions: * hfa384x_docmd_xxx These functions implement the sequence required * to issue any prism2 command. Primarily used by the * hfa384x_cmd_xxx functions. * * hfa384x_bap_xxx BAP read/write access functions. * Note: we usually use BAP0 for non-interrupt context * and BAP1 for interrupt context. * * hfa384x_dl_xxx download related functions. * * Driver State Issues: * Note that there are two pairs of functions that manage the * 'initialized' and 'running' states of the hw/MAC combo. The four * functions are create(), destroy(), start(), and stop(). create() * sets up the data structures required to support the hfa384x_* * functions and destroy() cleans them up. The start() function gets * the actual hardware running and enables the interrupts. The stop() * function shuts the hardware down. The sequence should be: * create() * start() * . * . Do interesting things w/ the hardware * . * stop() * destroy() * * Note that destroy() can be called without calling stop() first. * -------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/wireless.h> #include <linux/netdevice.h> #include <linux/timer.h> #include <linux/io.h> #include <linux/delay.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/list.h> #include <linux/usb.h> #include <linux/byteorder/generic.h> #define SUBMIT_URB(u, f) usb_submit_urb(u, f) #include "p80211types.h" #include "p80211hdr.h" #include "p80211mgmt.h" #include "p80211conv.h" #include "p80211msg.h" #include "p80211netdev.h" #include "p80211req.h" #include "p80211metadef.h" #include "p80211metastruct.h" #include "hfa384x.h" #include "prism2mgmt.h" enum cmd_mode { DOWAIT = 0, DOASYNC }; #define THROTTLE_JIFFIES (HZ/8) #define URB_ASYNC_UNLINK 0 #define USB_QUEUE_BULK 0 #define ROUNDUP64(a) (((a)+63)&~63) #ifdef DEBUG_USB static void dbprint_urb(struct urb *urb); #endif static void hfa384x_int_rxmonitor(wlandevice_t *wlandev, hfa384x_usb_rxfrm_t *rxfrm); static void hfa384x_usb_defer(struct work_struct *data); static int submit_rx_urb(hfa384x_t *hw, gfp_t flags); static int submit_tx_urb(hfa384x_t *hw, struct urb *tx_urb, gfp_t flags); /*---------------------------------------------------*/ /* Callbacks */ static void hfa384x_usbout_callback(struct urb *urb); static void hfa384x_ctlxout_callback(struct urb *urb); static void hfa384x_usbin_callback(struct urb *urb); static void hfa384x_usbin_txcompl(wlandevice_t *wlandev, hfa384x_usbin_t *usbin); static void hfa384x_usbin_rx(wlandevice_t *wlandev, struct sk_buff *skb); static void hfa384x_usbin_info(wlandevice_t *wlandev, hfa384x_usbin_t *usbin); static void hfa384x_usbout_tx(wlandevice_t *wlandev, hfa384x_usbout_t *usbout); static void hfa384x_usbin_ctlx(hfa384x_t *hw, hfa384x_usbin_t *usbin, int urb_status); /*---------------------------------------------------*/ /* Functions to support the prism2 usb command queue */ static void hfa384x_usbctlxq_run(hfa384x_t *hw); static void hfa384x_usbctlx_reqtimerfn(unsigned long data); static void hfa384x_usbctlx_resptimerfn(unsigned long data); static void hfa384x_usb_throttlefn(unsigned long data); static void hfa384x_usbctlx_completion_task(unsigned long data); static void hfa384x_usbctlx_reaper_task(unsigned long data); static int hfa384x_usbctlx_submit(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx); static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx); struct usbctlx_completor { int (*complete)(struct usbctlx_completor *); }; static int hfa384x_usbctlx_complete_sync(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx, struct usbctlx_completor *completor); static int unlocked_usbctlx_cancel_async(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx); static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx); static void hfa384x_cb_rrid(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx); static int usbctlx_get_status(const hfa384x_usb_cmdresp_t *cmdresp, hfa384x_cmdresult_t *result); static void usbctlx_get_rridresult(const hfa384x_usb_rridresp_t *rridresp, hfa384x_rridresult_t *result); /*---------------------------------------------------*/ /* Low level req/resp CTLX formatters and submitters */ static int hfa384x_docmd(hfa384x_t *hw, enum cmd_mode mode, hfa384x_metacmd_t *cmd, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data); static int hfa384x_dorrid(hfa384x_t *hw, enum cmd_mode mode, u16 rid, void *riddata, unsigned int riddatalen, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data); static int hfa384x_dowrid(hfa384x_t *hw, enum cmd_mode mode, u16 rid, void *riddata, unsigned int riddatalen, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data); static int hfa384x_dormem(hfa384x_t *hw, enum cmd_mode mode, u16 page, u16 offset, void *data, unsigned int len, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data); static int hfa384x_dowmem(hfa384x_t *hw, enum cmd_mode mode, u16 page, u16 offset, void *data, unsigned int len, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data); static int hfa384x_isgood_pdrcode(u16 pdrcode); static inline const char *ctlxstr(CTLX_STATE s) { static const char * const ctlx_str[] = { "Initial state", "Complete", "Request failed", "Request pending", "Request packet submitted", "Request packet completed", "Response packet completed" }; return ctlx_str[s]; }; static inline hfa384x_usbctlx_t *get_active_ctlx(hfa384x_t *hw) { return list_entry(hw->ctlxq.active.next, hfa384x_usbctlx_t, list); } #ifdef DEBUG_USB void dbprint_urb(struct urb *urb) { pr_debug("urb->pipe=0x%08x\n", urb->pipe); pr_debug("urb->status=0x%08x\n", urb->status); pr_debug("urb->transfer_flags=0x%08x\n", urb->transfer_flags); pr_debug("urb->transfer_buffer=0x%08x\n", (unsigned int)urb->transfer_buffer); pr_debug("urb->transfer_buffer_length=0x%08x\n", urb->transfer_buffer_length); pr_debug("urb->actual_length=0x%08x\n", urb->actual_length); pr_debug("urb->bandwidth=0x%08x\n", urb->bandwidth); pr_debug("urb->setup_packet(ctl)=0x%08x\n", (unsigned int)urb->setup_packet); pr_debug("urb->start_frame(iso/irq)=0x%08x\n", urb->start_frame); pr_debug("urb->interval(irq)=0x%08x\n", urb->interval); pr_debug("urb->error_count(iso)=0x%08x\n", urb->error_count); pr_debug("urb->timeout=0x%08x\n", urb->timeout); pr_debug("urb->context=0x%08x\n", (unsigned int)urb->context); pr_debug("urb->complete=0x%08x\n", (unsigned int)urb->complete); } #endif /*---------------------------------------------------------------- * submit_rx_urb * * Listen for input data on the BULK-IN pipe. If the pipe has * stalled then schedule it to be reset. * * Arguments: * hw device struct * memflags memory allocation flags * * Returns: * error code from submission * * Call context: * Any ----------------------------------------------------------------*/ static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags) { struct sk_buff *skb; int result; skb = dev_alloc_skb(sizeof(hfa384x_usbin_t)); if (skb == NULL) { result = -ENOMEM; goto done; } /* Post the IN urb */ usb_fill_bulk_urb(&hw->rx_urb, hw->usb, hw->endp_in, skb->data, sizeof(hfa384x_usbin_t), hfa384x_usbin_callback, hw->wlandev); hw->rx_urb_skb = skb; result = -ENOLINK; if (!hw->wlandev->hwremoved && !test_bit(WORK_RX_HALT, &hw->usb_flags)) { result = SUBMIT_URB(&hw->rx_urb, memflags); /* Check whether we need to reset the RX pipe */ if (result == -EPIPE) { netdev_warn(hw->wlandev->netdev, "%s rx pipe stalled: requesting reset\n", hw->wlandev->netdev->name); if (!test_and_set_bit(WORK_RX_HALT, &hw->usb_flags)) schedule_work(&hw->usb_work); } } /* Don't leak memory if anything should go wrong */ if (result != 0) { dev_kfree_skb(skb); hw->rx_urb_skb = NULL; } done: return result; } /*---------------------------------------------------------------- * submit_tx_urb * * Prepares and submits the URB of transmitted data. If the * submission fails then it will schedule the output pipe to * be reset. * * Arguments: * hw device struct * tx_urb URB of data for tranmission * memflags memory allocation flags * * Returns: * error code from submission * * Call context: * Any ----------------------------------------------------------------*/ static int submit_tx_urb(hfa384x_t *hw, struct urb *tx_urb, gfp_t memflags) { struct net_device *netdev = hw->wlandev->netdev; int result; result = -ENOLINK; if (netif_running(netdev)) { if (!hw->wlandev->hwremoved && !test_bit(WORK_TX_HALT, &hw->usb_flags)) { result = SUBMIT_URB(tx_urb, memflags); /* Test whether we need to reset the TX pipe */ if (result == -EPIPE) { netdev_warn(hw->wlandev->netdev, "%s tx pipe stalled: requesting reset\n", netdev->name); set_bit(WORK_TX_HALT, &hw->usb_flags); schedule_work(&hw->usb_work); } else if (result == 0) { netif_stop_queue(netdev); } } } return result; } /*---------------------------------------------------------------- * hfa394x_usb_defer * * There are some things that the USB stack cannot do while * in interrupt context, so we arrange this function to run * in process context. * * Arguments: * hw device structure * * Returns: * nothing * * Call context: * process (by design) ----------------------------------------------------------------*/ static void hfa384x_usb_defer(struct work_struct *data) { hfa384x_t *hw = container_of(data, struct hfa384x, usb_work); struct net_device *netdev = hw->wlandev->netdev; /* Don't bother trying to reset anything if the plug * has been pulled ... */ if (hw->wlandev->hwremoved) return; /* Reception has stopped: try to reset the input pipe */ if (test_bit(WORK_RX_HALT, &hw->usb_flags)) { int ret; usb_kill_urb(&hw->rx_urb); /* Cannot be holding spinlock! */ ret = usb_clear_halt(hw->usb, hw->endp_in); if (ret != 0) { netdev_err(hw->wlandev->netdev, "Failed to clear rx pipe for %s: err=%d\n", netdev->name, ret); } else { netdev_info(hw->wlandev->netdev, "%s rx pipe reset complete.\n", netdev->name); clear_bit(WORK_RX_HALT, &hw->usb_flags); set_bit(WORK_RX_RESUME, &hw->usb_flags); } } /* Resume receiving data back from the device. */ if (test_bit(WORK_RX_RESUME, &hw->usb_flags)) { int ret; ret = submit_rx_urb(hw, GFP_KERNEL); if (ret != 0) { netdev_err(hw->wlandev->netdev, "Failed to resume %s rx pipe.\n", netdev->name); } else { clear_bit(WORK_RX_RESUME, &hw->usb_flags); } } /* Transmission has stopped: try to reset the output pipe */ if (test_bit(WORK_TX_HALT, &hw->usb_flags)) { int ret; usb_kill_urb(&hw->tx_urb); ret = usb_clear_halt(hw->usb, hw->endp_out); if (ret != 0) { netdev_err(hw->wlandev->netdev, "Failed to clear tx pipe for %s: err=%d\n", netdev->name, ret); } else { netdev_info(hw->wlandev->netdev, "%s tx pipe reset complete.\n", netdev->name); clear_bit(WORK_TX_HALT, &hw->usb_flags); set_bit(WORK_TX_RESUME, &hw->usb_flags); /* Stopping the BULK-OUT pipe also blocked * us from sending any more CTLX URBs, so * we need to re-run our queue ... */ hfa384x_usbctlxq_run(hw); } } /* Resume transmitting. */ if (test_and_clear_bit(WORK_TX_RESUME, &hw->usb_flags)) netif_wake_queue(hw->wlandev->netdev); } /*---------------------------------------------------------------- * hfa384x_create * * Sets up the hfa384x_t data structure for use. Note this * does _not_ initialize the actual hardware, just the data structures * we use to keep track of its state. * * Arguments: * hw device structure * irq device irq number * iobase i/o base address for register access * membase memory base address for register access * * Returns: * nothing * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ void hfa384x_create(hfa384x_t *hw, struct usb_device *usb) { memset(hw, 0, sizeof(hfa384x_t)); hw->usb = usb; /* set up the endpoints */ hw->endp_in = usb_rcvbulkpipe(usb, 1); hw->endp_out = usb_sndbulkpipe(usb, 2); /* Set up the waitq */ init_waitqueue_head(&hw->cmdq); /* Initialize the command queue */ spin_lock_init(&hw->ctlxq.lock); INIT_LIST_HEAD(&hw->ctlxq.pending); INIT_LIST_HEAD(&hw->ctlxq.active); INIT_LIST_HEAD(&hw->ctlxq.completing); INIT_LIST_HEAD(&hw->ctlxq.reapable); /* Initialize the authentication queue */ skb_queue_head_init(&hw->authq); tasklet_init(&hw->reaper_bh, hfa384x_usbctlx_reaper_task, (unsigned long)hw); tasklet_init(&hw->completion_bh, hfa384x_usbctlx_completion_task, (unsigned long)hw); INIT_WORK(&hw->link_bh, prism2sta_processing_defer); INIT_WORK(&hw->usb_work, hfa384x_usb_defer); init_timer(&hw->throttle); hw->throttle.function = hfa384x_usb_throttlefn; hw->throttle.data = (unsigned long)hw; init_timer(&hw->resptimer); hw->resptimer.function = hfa384x_usbctlx_resptimerfn; hw->resptimer.data = (unsigned long)hw; init_timer(&hw->reqtimer); hw->reqtimer.function = hfa384x_usbctlx_reqtimerfn; hw->reqtimer.data = (unsigned long)hw; usb_init_urb(&hw->rx_urb); usb_init_urb(&hw->tx_urb); usb_init_urb(&hw->ctlx_urb); hw->link_status = HFA384x_LINK_NOTCONNECTED; hw->state = HFA384x_STATE_INIT; INIT_WORK(&hw->commsqual_bh, prism2sta_commsqual_defer); init_timer(&hw->commsqual_timer); hw->commsqual_timer.data = (unsigned long)hw; hw->commsqual_timer.function = prism2sta_commsqual_timer; } /*---------------------------------------------------------------- * hfa384x_destroy * * Partner to hfa384x_create(). This function cleans up the hw * structure so that it can be freed by the caller using a simple * kfree. Currently, this function is just a placeholder. If, at some * point in the future, an hw in the 'shutdown' state requires a 'deep' * kfree, this is where it should be done. Note that if this function * is called on a _running_ hw structure, the drvr_stop() function is * called. * * Arguments: * hw device structure * * Returns: * nothing, this function is not allowed to fail. * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ void hfa384x_destroy(hfa384x_t *hw) { struct sk_buff *skb; if (hw->state == HFA384x_STATE_RUNNING) hfa384x_drvr_stop(hw); hw->state = HFA384x_STATE_PREINIT; kfree(hw->scanresults); hw->scanresults = NULL; /* Now to clean out the auth queue */ while ((skb = skb_dequeue(&hw->authq))) dev_kfree_skb(skb); } static hfa384x_usbctlx_t *usbctlx_alloc(void) { hfa384x_usbctlx_t *ctlx; ctlx = kmalloc(sizeof(*ctlx), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (ctlx != NULL) { memset(ctlx, 0, sizeof(*ctlx)); init_completion(&ctlx->done); } return ctlx; } static int usbctlx_get_status(const hfa384x_usb_cmdresp_t *cmdresp, hfa384x_cmdresult_t *result) { result->status = le16_to_cpu(cmdresp->status); result->resp0 = le16_to_cpu(cmdresp->resp0); result->resp1 = le16_to_cpu(cmdresp->resp1); result->resp2 = le16_to_cpu(cmdresp->resp2); pr_debug("cmdresult:status=0x%04x resp0=0x%04x resp1=0x%04x resp2=0x%04x\n", result->status, result->resp0, result->resp1, result->resp2); return result->status & HFA384x_STATUS_RESULT; } static void usbctlx_get_rridresult(const hfa384x_usb_rridresp_t *rridresp, hfa384x_rridresult_t *result) { result->rid = le16_to_cpu(rridresp->rid); result->riddata = rridresp->data; result->riddata_len = ((le16_to_cpu(rridresp->frmlen) - 1) * 2); } /*---------------------------------------------------------------- * Completor object: * This completor must be passed to hfa384x_usbctlx_complete_sync() * when processing a CTLX that returns a hfa384x_cmdresult_t structure. ----------------------------------------------------------------*/ struct usbctlx_cmd_completor { struct usbctlx_completor head; const hfa384x_usb_cmdresp_t *cmdresp; hfa384x_cmdresult_t *result; }; static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head) { struct usbctlx_cmd_completor *complete; complete = (struct usbctlx_cmd_completor *)head; return usbctlx_get_status(complete->cmdresp, complete->result); } static inline struct usbctlx_completor *init_cmd_completor( struct usbctlx_cmd_completor *completor, const hfa384x_usb_cmdresp_t *cmdresp, hfa384x_cmdresult_t *result) { completor->head.complete = usbctlx_cmd_completor_fn; completor->cmdresp = cmdresp; completor->result = result; return &(completor->head); } /*---------------------------------------------------------------- * Completor object: * This completor must be passed to hfa384x_usbctlx_complete_sync() * when processing a CTLX that reads a RID. ----------------------------------------------------------------*/ struct usbctlx_rrid_completor { struct usbctlx_completor head; const hfa384x_usb_rridresp_t *rridresp; void *riddata; unsigned int riddatalen; }; static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head) { struct usbctlx_rrid_completor *complete; hfa384x_rridresult_t rridresult; complete = (struct usbctlx_rrid_completor *)head; usbctlx_get_rridresult(complete->rridresp, &rridresult); /* Validate the length, note body len calculation in bytes */ if (rridresult.riddata_len != complete->riddatalen) { pr_warn("RID len mismatch, rid=0x%04x hlen=%d fwlen=%d\n", rridresult.rid, complete->riddatalen, rridresult.riddata_len); return -ENODATA; } memcpy(complete->riddata, rridresult.riddata, complete->riddatalen); return 0; } static inline struct usbctlx_completor *init_rrid_completor( struct usbctlx_rrid_completor *completor, const hfa384x_usb_rridresp_t *rridresp, void *riddata, unsigned int riddatalen) { completor->head.complete = usbctlx_rrid_completor_fn; completor->rridresp = rridresp; completor->riddata = riddata; completor->riddatalen = riddatalen; return &(completor->head); } /*---------------------------------------------------------------- * Completor object: * Interprets the results of a synchronous RID-write ----------------------------------------------------------------*/ #define init_wrid_completor init_cmd_completor /*---------------------------------------------------------------- * Completor object: * Interprets the results of a synchronous memory-write ----------------------------------------------------------------*/ #define init_wmem_completor init_cmd_completor /*---------------------------------------------------------------- * Completor object: * Interprets the results of a synchronous memory-read ----------------------------------------------------------------*/ struct usbctlx_rmem_completor { struct usbctlx_completor head; const hfa384x_usb_rmemresp_t *rmemresp; void *data; unsigned int len; }; static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head) { struct usbctlx_rmem_completor *complete = (struct usbctlx_rmem_completor *)head; pr_debug("rmemresp:len=%d\n", complete->rmemresp->frmlen); memcpy(complete->data, complete->rmemresp->data, complete->len); return 0; } static inline struct usbctlx_completor *init_rmem_completor( struct usbctlx_rmem_completor *completor, hfa384x_usb_rmemresp_t *rmemresp, void *data, unsigned int len) { completor->head.complete = usbctlx_rmem_completor_fn; completor->rmemresp = rmemresp; completor->data = data; completor->len = len; return &(completor->head); } /*---------------------------------------------------------------- * hfa384x_cb_status * * Ctlx_complete handler for async CMD type control exchanges. * mark the hw struct as such. * * Note: If the handling is changed here, it should probably be * changed in docmd as well. * * Arguments: * hw hw struct * ctlx completed CTLX * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx) { if (ctlx->usercb != NULL) { hfa384x_cmdresult_t cmdresult; if (ctlx->state != CTLX_COMPLETE) { memset(&cmdresult, 0, sizeof(cmdresult)); cmdresult.status = HFA384x_STATUS_RESULT_SET(HFA384x_CMD_ERR); } else { usbctlx_get_status(&ctlx->inbuf.cmdresp, &cmdresult); } ctlx->usercb(hw, &cmdresult, ctlx->usercb_data); } } /*---------------------------------------------------------------- * hfa384x_cb_rrid * * CTLX completion handler for async RRID type control exchanges. * * Note: If the handling is changed here, it should probably be * changed in dorrid as well. * * Arguments: * hw hw struct * ctlx completed CTLX * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_cb_rrid(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx) { if (ctlx->usercb != NULL) { hfa384x_rridresult_t rridresult; if (ctlx->state != CTLX_COMPLETE) { memset(&rridresult, 0, sizeof(rridresult)); rridresult.rid = le16_to_cpu(ctlx->outbuf.rridreq.rid); } else { usbctlx_get_rridresult(&ctlx->inbuf.rridresp, &rridresult); } ctlx->usercb(hw, &rridresult, ctlx->usercb_data); } } static inline int hfa384x_docmd_wait(hfa384x_t *hw, hfa384x_metacmd_t *cmd) { return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL); } static inline int hfa384x_docmd_async(hfa384x_t *hw, hfa384x_metacmd_t *cmd, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { return hfa384x_docmd(hw, DOASYNC, cmd, cmdcb, usercb, usercb_data); } static inline int hfa384x_dorrid_wait(hfa384x_t *hw, u16 rid, void *riddata, unsigned int riddatalen) { return hfa384x_dorrid(hw, DOWAIT, rid, riddata, riddatalen, NULL, NULL, NULL); } static inline int hfa384x_dorrid_async(hfa384x_t *hw, u16 rid, void *riddata, unsigned int riddatalen, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { return hfa384x_dorrid(hw, DOASYNC, rid, riddata, riddatalen, cmdcb, usercb, usercb_data); } static inline int hfa384x_dowrid_wait(hfa384x_t *hw, u16 rid, void *riddata, unsigned int riddatalen) { return hfa384x_dowrid(hw, DOWAIT, rid, riddata, riddatalen, NULL, NULL, NULL); } static inline int hfa384x_dowrid_async(hfa384x_t *hw, u16 rid, void *riddata, unsigned int riddatalen, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { return hfa384x_dowrid(hw, DOASYNC, rid, riddata, riddatalen, cmdcb, usercb, usercb_data); } static inline int hfa384x_dormem_wait(hfa384x_t *hw, u16 page, u16 offset, void *data, unsigned int len) { return hfa384x_dormem(hw, DOWAIT, page, offset, data, len, NULL, NULL, NULL); } static inline int hfa384x_dormem_async(hfa384x_t *hw, u16 page, u16 offset, void *data, unsigned int len, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { return hfa384x_dormem(hw, DOASYNC, page, offset, data, len, cmdcb, usercb, usercb_data); } static inline int hfa384x_dowmem_wait(hfa384x_t *hw, u16 page, u16 offset, void *data, unsigned int len) { return hfa384x_dowmem(hw, DOWAIT, page, offset, data, len, NULL, NULL, NULL); } static inline int hfa384x_dowmem_async(hfa384x_t *hw, u16 page, u16 offset, void *data, unsigned int len, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { return hfa384x_dowmem(hw, DOASYNC, page, offset, data, len, cmdcb, usercb, usercb_data); } /*---------------------------------------------------------------- * hfa384x_cmd_initialize * * Issues the initialize command and sets the hw->state based * on the result. * * Arguments: * hw device structure * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_cmd_initialize(hfa384x_t *hw) { int result = 0; int i; hfa384x_metacmd_t cmd; cmd.cmd = HFA384x_CMDCODE_INIT; cmd.parm0 = 0; cmd.parm1 = 0; cmd.parm2 = 0; result = hfa384x_docmd_wait(hw, &cmd); pr_debug("cmdresp.init: status=0x%04x, resp0=0x%04x, resp1=0x%04x, resp2=0x%04x\n", cmd.result.status, cmd.result.resp0, cmd.result.resp1, cmd.result.resp2); if (result == 0) { for (i = 0; i < HFA384x_NUMPORTS_MAX; i++) hw->port_enabled[i] = 0; } hw->link_status = HFA384x_LINK_NOTCONNECTED; return result; } /*---------------------------------------------------------------- * hfa384x_cmd_disable * * Issues the disable command to stop communications on one of * the MACs 'ports'. * * Arguments: * hw device structure * macport MAC port number (host order) * * Returns: * 0 success * >0 f/w reported failure - f/w status code * <0 driver reported error (timeout|bad arg) * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport) { int result = 0; hfa384x_metacmd_t cmd; cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_DISABLE) | HFA384x_CMD_MACPORT_SET(macport); cmd.parm0 = 0; cmd.parm1 = 0; cmd.parm2 = 0; result = hfa384x_docmd_wait(hw, &cmd); return result; } /*---------------------------------------------------------------- * hfa384x_cmd_enable * * Issues the enable command to enable communications on one of * the MACs 'ports'. * * Arguments: * hw device structure * macport MAC port number * * Returns: * 0 success * >0 f/w reported failure - f/w status code * <0 driver reported error (timeout|bad arg) * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport) { int result = 0; hfa384x_metacmd_t cmd; cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_ENABLE) | HFA384x_CMD_MACPORT_SET(macport); cmd.parm0 = 0; cmd.parm1 = 0; cmd.parm2 = 0; result = hfa384x_docmd_wait(hw, &cmd); return result; } /*---------------------------------------------------------------- * hfa384x_cmd_monitor * * Enables the 'monitor mode' of the MAC. Here's the description of * monitor mode that I've received thus far: * * "The "monitor mode" of operation is that the MAC passes all * frames for which the PLCP checks are correct. All received * MPDUs are passed to the host with MAC Port = 7, with a * receive status of good, FCS error, or undecryptable. Passing * certain MPDUs is a violation of the 802.11 standard, but useful * for a debugging tool." Normal communication is not possible * while monitor mode is enabled. * * Arguments: * hw device structure * enable a code (0x0b|0x0f) that enables/disables * monitor mode. (host order) * * Returns: * 0 success * >0 f/w reported failure - f/w status code * <0 driver reported error (timeout|bad arg) * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable) { int result = 0; hfa384x_metacmd_t cmd; cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_MONITOR) | HFA384x_CMD_AINFO_SET(enable); cmd.parm0 = 0; cmd.parm1 = 0; cmd.parm2 = 0; result = hfa384x_docmd_wait(hw, &cmd); return result; } /*---------------------------------------------------------------- * hfa384x_cmd_download * * Sets the controls for the MAC controller code/data download * process. The arguments set the mode and address associated * with a download. Note that the aux registers should be enabled * prior to setting one of the download enable modes. * * Arguments: * hw device structure * mode 0 - Disable programming and begin code exec * 1 - Enable volatile mem programming * 2 - Enable non-volatile mem programming * 3 - Program non-volatile section from NV download * buffer. * (host order) * lowaddr * highaddr For mode 1, sets the high & low order bits of * the "destination address". This address will be * the execution start address when download is * subsequently disabled. * For mode 2, sets the high & low order bits of * the destination in NV ram. * For modes 0 & 3, should be zero. (host order) * NOTE: these are CMD format. * codelen Length of the data to write in mode 2, * zero otherwise. (host order) * * Returns: * 0 success * >0 f/w reported failure - f/w status code * <0 driver reported error (timeout|bad arg) * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_cmd_download(hfa384x_t *hw, u16 mode, u16 lowaddr, u16 highaddr, u16 codelen) { int result = 0; hfa384x_metacmd_t cmd; pr_debug("mode=%d, lowaddr=0x%04x, highaddr=0x%04x, codelen=%d\n", mode, lowaddr, highaddr, codelen); cmd.cmd = (HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_DOWNLD) | HFA384x_CMD_PROGMODE_SET(mode)); cmd.parm0 = lowaddr; cmd.parm1 = highaddr; cmd.parm2 = codelen; result = hfa384x_docmd_wait(hw, &cmd); return result; } /*---------------------------------------------------------------- * hfa384x_corereset * * Perform a reset of the hfa38xx MAC core. We assume that the hw * structure is in its "created" state. That is, it is initialized * with proper values. Note that if a reset is done after the * device has been active for awhile, the caller might have to clean * up some leftover cruft in the hw structure. * * Arguments: * hw device structure * holdtime how long (in ms) to hold the reset * settletime how long (in ms) to wait after releasing * the reset * * Returns: * nothing * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis) { int result = 0; result = usb_reset_device(hw->usb); if (result < 0) { netdev_err(hw->wlandev->netdev, "usb_reset_device() failed, result=%d.\n", result); } return result; } /*---------------------------------------------------------------- * hfa384x_usbctlx_complete_sync * * Waits for a synchronous CTLX object to complete, * and then handles the response. * * Arguments: * hw device structure * ctlx CTLX ptr * completor functor object to decide what to * do with the CTLX's result. * * Returns: * 0 Success * -ERESTARTSYS Interrupted by a signal * -EIO CTLX failed * -ENODEV Adapter was unplugged * ??? Result from completor * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ static int hfa384x_usbctlx_complete_sync(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx, struct usbctlx_completor *completor) { unsigned long flags; int result; result = wait_for_completion_interruptible(&ctlx->done); spin_lock_irqsave(&hw->ctlxq.lock, flags); /* * We can only handle the CTLX if the USB disconnect * function has not run yet ... */ cleanup: if (hw->wlandev->hwremoved) { spin_unlock_irqrestore(&hw->ctlxq.lock, flags); result = -ENODEV; } else if (result != 0) { int runqueue = 0; /* * We were probably interrupted, so delete * this CTLX asynchronously, kill the timers * and the URB, and then start the next * pending CTLX. * * NOTE: We can only delete the timers and * the URB if this CTLX is active. */ if (ctlx == get_active_ctlx(hw)) { spin_unlock_irqrestore(&hw->ctlxq.lock, flags); del_singleshot_timer_sync(&hw->reqtimer); del_singleshot_timer_sync(&hw->resptimer); hw->req_timer_done = 1; hw->resp_timer_done = 1; usb_kill_urb(&hw->ctlx_urb); spin_lock_irqsave(&hw->ctlxq.lock, flags); runqueue = 1; /* * This scenario is so unlikely that I'm * happy with a grubby "goto" solution ... */ if (hw->wlandev->hwremoved) goto cleanup; } /* * The completion task will send this CTLX * to the reaper the next time it runs. We * are no longer in a hurry. */ ctlx->reapable = 1; ctlx->state = CTLX_REQ_FAILED; list_move_tail(&ctlx->list, &hw->ctlxq.completing); spin_unlock_irqrestore(&hw->ctlxq.lock, flags); if (runqueue) hfa384x_usbctlxq_run(hw); } else { if (ctlx->state == CTLX_COMPLETE) { result = completor->complete(completor); } else { netdev_warn(hw->wlandev->netdev, "CTLX[%d] error: state(%s)\n", le16_to_cpu(ctlx->outbuf.type), ctlxstr(ctlx->state)); result = -EIO; } list_del(&ctlx->list); spin_unlock_irqrestore(&hw->ctlxq.lock, flags); kfree(ctlx); } return result; } /*---------------------------------------------------------------- * hfa384x_docmd * * Constructs a command CTLX and submits it. * * NOTE: Any changes to the 'post-submit' code in this function * need to be carried over to hfa384x_cbcmd() since the handling * is virtually identical. * * Arguments: * hw device structure * mode DOWAIT or DOASYNC * cmd cmd structure. Includes all arguments and result * data points. All in host order. in host order * cmdcb command-specific callback * usercb user callback for async calls, NULL for DOWAIT calls * usercb_data user supplied data pointer for async calls, NULL * for DOASYNC calls * * Returns: * 0 success * -EIO CTLX failure * -ERESTARTSYS Awakened on signal * >0 command indicated error, Status and Resp0-2 are * in hw structure. * * Side effects: * * * Call context: * process ----------------------------------------------------------------*/ static int hfa384x_docmd(hfa384x_t *hw, enum cmd_mode mode, hfa384x_metacmd_t *cmd, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { int result; hfa384x_usbctlx_t *ctlx; ctlx = usbctlx_alloc(); if (ctlx == NULL) { result = -ENOMEM; goto done; } /* Initialize the command */ ctlx->outbuf.cmdreq.type = cpu_to_le16(HFA384x_USB_CMDREQ); ctlx->outbuf.cmdreq.cmd = cpu_to_le16(cmd->cmd); ctlx->outbuf.cmdreq.parm0 = cpu_to_le16(cmd->parm0); ctlx->outbuf.cmdreq.parm1 = cpu_to_le16(cmd->parm1); ctlx->outbuf.cmdreq.parm2 = cpu_to_le16(cmd->parm2); ctlx->outbufsize = sizeof(ctlx->outbuf.cmdreq); pr_debug("cmdreq: cmd=0x%04x parm0=0x%04x parm1=0x%04x parm2=0x%04x\n", cmd->cmd, cmd->parm0, cmd->parm1, cmd->parm2); ctlx->reapable = mode; ctlx->cmdcb = cmdcb; ctlx->usercb = usercb; ctlx->usercb_data = usercb_data; result = hfa384x_usbctlx_submit(hw, ctlx); if (result != 0) { kfree(ctlx); } else if (mode == DOWAIT) { struct usbctlx_cmd_completor completor; result = hfa384x_usbctlx_complete_sync(hw, ctlx, init_cmd_completor(&completor, &ctlx-> inbuf. cmdresp, &cmd-> result)); } done: return result; } /*---------------------------------------------------------------- * hfa384x_dorrid * * Constructs a read rid CTLX and issues it. * * NOTE: Any changes to the 'post-submit' code in this function * need to be carried over to hfa384x_cbrrid() since the handling * is virtually identical. * * Arguments: * hw device structure * mode DOWAIT or DOASYNC * rid Read RID number (host order) * riddata Caller supplied buffer that MAC formatted RID.data * record will be written to for DOWAIT calls. Should * be NULL for DOASYNC calls. * riddatalen Buffer length for DOWAIT calls. Zero for DOASYNC calls. * cmdcb command callback for async calls, NULL for DOWAIT calls * usercb user callback for async calls, NULL for DOWAIT calls * usercb_data user supplied data pointer for async calls, NULL * for DOWAIT calls * * Returns: * 0 success * -EIO CTLX failure * -ERESTARTSYS Awakened on signal * -ENODATA riddatalen != macdatalen * >0 command indicated error, Status and Resp0-2 are * in hw structure. * * Side effects: * * Call context: * interrupt (DOASYNC) * process (DOWAIT or DOASYNC) ----------------------------------------------------------------*/ static int hfa384x_dorrid(hfa384x_t *hw, enum cmd_mode mode, u16 rid, void *riddata, unsigned int riddatalen, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { int result; hfa384x_usbctlx_t *ctlx; ctlx = usbctlx_alloc(); if (ctlx == NULL) { result = -ENOMEM; goto done; } /* Initialize the command */ ctlx->outbuf.rridreq.type = cpu_to_le16(HFA384x_USB_RRIDREQ); ctlx->outbuf.rridreq.frmlen = cpu_to_le16(sizeof(ctlx->outbuf.rridreq.rid)); ctlx->outbuf.rridreq.rid = cpu_to_le16(rid); ctlx->outbufsize = sizeof(ctlx->outbuf.rridreq); ctlx->reapable = mode; ctlx->cmdcb = cmdcb; ctlx->usercb = usercb; ctlx->usercb_data = usercb_data; /* Submit the CTLX */ result = hfa384x_usbctlx_submit(hw, ctlx); if (result != 0) { kfree(ctlx); } else if (mode == DOWAIT) { struct usbctlx_rrid_completor completor; result = hfa384x_usbctlx_complete_sync(hw, ctlx, init_rrid_completor (&completor, &ctlx->inbuf.rridresp, riddata, riddatalen)); } done: return result; } /*---------------------------------------------------------------- * hfa384x_dowrid * * Constructs a write rid CTLX and issues it. * * NOTE: Any changes to the 'post-submit' code in this function * need to be carried over to hfa384x_cbwrid() since the handling * is virtually identical. * * Arguments: * hw device structure * enum cmd_mode DOWAIT or DOASYNC * rid RID code * riddata Data portion of RID formatted for MAC * riddatalen Length of the data portion in bytes * cmdcb command callback for async calls, NULL for DOWAIT calls * usercb user callback for async calls, NULL for DOWAIT calls * usercb_data user supplied data pointer for async calls * * Returns: * 0 success * -ETIMEDOUT timed out waiting for register ready or * command completion * >0 command indicated error, Status and Resp0-2 are * in hw structure. * * Side effects: * * Call context: * interrupt (DOASYNC) * process (DOWAIT or DOASYNC) ----------------------------------------------------------------*/ static int hfa384x_dowrid(hfa384x_t *hw, enum cmd_mode mode, u16 rid, void *riddata, unsigned int riddatalen, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { int result; hfa384x_usbctlx_t *ctlx; ctlx = usbctlx_alloc(); if (ctlx == NULL) { result = -ENOMEM; goto done; } /* Initialize the command */ ctlx->outbuf.wridreq.type = cpu_to_le16(HFA384x_USB_WRIDREQ); ctlx->outbuf.wridreq.frmlen = cpu_to_le16((sizeof (ctlx->outbuf.wridreq.rid) + riddatalen + 1) / 2); ctlx->outbuf.wridreq.rid = cpu_to_le16(rid); memcpy(ctlx->outbuf.wridreq.data, riddata, riddatalen); ctlx->outbufsize = sizeof(ctlx->outbuf.wridreq.type) + sizeof(ctlx->outbuf.wridreq.frmlen) + sizeof(ctlx->outbuf.wridreq.rid) + riddatalen; ctlx->reapable = mode; ctlx->cmdcb = cmdcb; ctlx->usercb = usercb; ctlx->usercb_data = usercb_data; /* Submit the CTLX */ result = hfa384x_usbctlx_submit(hw, ctlx); if (result != 0) { kfree(ctlx); } else if (mode == DOWAIT) { struct usbctlx_cmd_completor completor; hfa384x_cmdresult_t wridresult; result = hfa384x_usbctlx_complete_sync(hw, ctlx, init_wrid_completor (&completor, &ctlx->inbuf.wridresp, &wridresult)); } done: return result; } /*---------------------------------------------------------------- * hfa384x_dormem * * Constructs a readmem CTLX and issues it. * * NOTE: Any changes to the 'post-submit' code in this function * need to be carried over to hfa384x_cbrmem() since the handling * is virtually identical. * * Arguments: * hw device structure * mode DOWAIT or DOASYNC * page MAC address space page (CMD format) * offset MAC address space offset * data Ptr to data buffer to receive read * len Length of the data to read (max == 2048) * cmdcb command callback for async calls, NULL for DOWAIT calls * usercb user callback for async calls, NULL for DOWAIT calls * usercb_data user supplied data pointer for async calls * * Returns: * 0 success * -ETIMEDOUT timed out waiting for register ready or * command completion * >0 command indicated error, Status and Resp0-2 are * in hw structure. * * Side effects: * * Call context: * interrupt (DOASYNC) * process (DOWAIT or DOASYNC) ----------------------------------------------------------------*/ static int hfa384x_dormem(hfa384x_t *hw, enum cmd_mode mode, u16 page, u16 offset, void *data, unsigned int len, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { int result; hfa384x_usbctlx_t *ctlx; ctlx = usbctlx_alloc(); if (ctlx == NULL) { result = -ENOMEM; goto done; } /* Initialize the command */ ctlx->outbuf.rmemreq.type = cpu_to_le16(HFA384x_USB_RMEMREQ); ctlx->outbuf.rmemreq.frmlen = cpu_to_le16(sizeof(ctlx->outbuf.rmemreq.offset) + sizeof(ctlx->outbuf.rmemreq.page) + len); ctlx->outbuf.rmemreq.offset = cpu_to_le16(offset); ctlx->outbuf.rmemreq.page = cpu_to_le16(page); ctlx->outbufsize = sizeof(ctlx->outbuf.rmemreq); pr_debug("type=0x%04x frmlen=%d offset=0x%04x page=0x%04x\n", ctlx->outbuf.rmemreq.type, ctlx->outbuf.rmemreq.frmlen, ctlx->outbuf.rmemreq.offset, ctlx->outbuf.rmemreq.page); pr_debug("pktsize=%zd\n", ROUNDUP64(sizeof(ctlx->outbuf.rmemreq))); ctlx->reapable = mode; ctlx->cmdcb = cmdcb; ctlx->usercb = usercb; ctlx->usercb_data = usercb_data; result = hfa384x_usbctlx_submit(hw, ctlx); if (result != 0) { kfree(ctlx); } else if (mode == DOWAIT) { struct usbctlx_rmem_completor completor; result = hfa384x_usbctlx_complete_sync(hw, ctlx, init_rmem_completor (&completor, &ctlx->inbuf.rmemresp, data, len)); } done: return result; } /*---------------------------------------------------------------- * hfa384x_dowmem * * Constructs a writemem CTLX and issues it. * * NOTE: Any changes to the 'post-submit' code in this function * need to be carried over to hfa384x_cbwmem() since the handling * is virtually identical. * * Arguments: * hw device structure * mode DOWAIT or DOASYNC * page MAC address space page (CMD format) * offset MAC address space offset * data Ptr to data buffer containing write data * len Length of the data to read (max == 2048) * cmdcb command callback for async calls, NULL for DOWAIT calls * usercb user callback for async calls, NULL for DOWAIT calls * usercb_data user supplied data pointer for async calls. * * Returns: * 0 success * -ETIMEDOUT timed out waiting for register ready or * command completion * >0 command indicated error, Status and Resp0-2 are * in hw structure. * * Side effects: * * Call context: * interrupt (DOWAIT) * process (DOWAIT or DOASYNC) ----------------------------------------------------------------*/ static int hfa384x_dowmem(hfa384x_t *hw, enum cmd_mode mode, u16 page, u16 offset, void *data, unsigned int len, ctlx_cmdcb_t cmdcb, ctlx_usercb_t usercb, void *usercb_data) { int result; hfa384x_usbctlx_t *ctlx; pr_debug("page=0x%04x offset=0x%04x len=%d\n", page, offset, len); ctlx = usbctlx_alloc(); if (ctlx == NULL) { result = -ENOMEM; goto done; } /* Initialize the command */ ctlx->outbuf.wmemreq.type = cpu_to_le16(HFA384x_USB_WMEMREQ); ctlx->outbuf.wmemreq.frmlen = cpu_to_le16(sizeof(ctlx->outbuf.wmemreq.offset) + sizeof(ctlx->outbuf.wmemreq.page) + len); ctlx->outbuf.wmemreq.offset = cpu_to_le16(offset); ctlx->outbuf.wmemreq.page = cpu_to_le16(page); memcpy(ctlx->outbuf.wmemreq.data, data, len); ctlx->outbufsize = sizeof(ctlx->outbuf.wmemreq.type) + sizeof(ctlx->outbuf.wmemreq.frmlen) + sizeof(ctlx->outbuf.wmemreq.offset) + sizeof(ctlx->outbuf.wmemreq.page) + len; ctlx->reapable = mode; ctlx->cmdcb = cmdcb; ctlx->usercb = usercb; ctlx->usercb_data = usercb_data; result = hfa384x_usbctlx_submit(hw, ctlx); if (result != 0) { kfree(ctlx); } else if (mode == DOWAIT) { struct usbctlx_cmd_completor completor; hfa384x_cmdresult_t wmemresult; result = hfa384x_usbctlx_complete_sync(hw, ctlx, init_wmem_completor (&completor, &ctlx->inbuf.wmemresp, &wmemresult)); } done: return result; } /*---------------------------------------------------------------- * hfa384x_drvr_commtallies * * Send a commtallies inquiry to the MAC. Note that this is an async * call that will result in an info frame arriving sometime later. * * Arguments: * hw device structure * * Returns: * zero success. * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_commtallies(hfa384x_t *hw) { hfa384x_metacmd_t cmd; cmd.cmd = HFA384x_CMDCODE_INQ; cmd.parm0 = HFA384x_IT_COMMTALLIES; cmd.parm1 = 0; cmd.parm2 = 0; hfa384x_docmd_async(hw, &cmd, NULL, NULL, NULL); return 0; } /*---------------------------------------------------------------- * hfa384x_drvr_disable * * Issues the disable command to stop communications on one of * the MACs 'ports'. Only macport 0 is valid for stations. * APs may also disable macports 1-6. Only ports that have been * previously enabled may be disabled. * * Arguments: * hw device structure * macport MAC port number (host order) * * Returns: * 0 success * >0 f/w reported failure - f/w status code * <0 driver reported error (timeout|bad arg) * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_disable(hfa384x_t *hw, u16 macport) { int result = 0; if ((!hw->isap && macport != 0) || (hw->isap && !(macport <= HFA384x_PORTID_MAX)) || !(hw->port_enabled[macport])) { result = -EINVAL; } else { result = hfa384x_cmd_disable(hw, macport); if (result == 0) hw->port_enabled[macport] = 0; } return result; } /*---------------------------------------------------------------- * hfa384x_drvr_enable * * Issues the enable command to enable communications on one of * the MACs 'ports'. Only macport 0 is valid for stations. * APs may also enable macports 1-6. Only ports that are currently * disabled may be enabled. * * Arguments: * hw device structure * macport MAC port number * * Returns: * 0 success * >0 f/w reported failure - f/w status code * <0 driver reported error (timeout|bad arg) * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_enable(hfa384x_t *hw, u16 macport) { int result = 0; if ((!hw->isap && macport != 0) || (hw->isap && !(macport <= HFA384x_PORTID_MAX)) || (hw->port_enabled[macport])) { result = -EINVAL; } else { result = hfa384x_cmd_enable(hw, macport); if (result == 0) hw->port_enabled[macport] = 1; } return result; } /*---------------------------------------------------------------- * hfa384x_drvr_flashdl_enable * * Begins the flash download state. Checks to see that we're not * already in a download state and that a port isn't enabled. * Sets the download state and retrieves the flash download * buffer location, buffer size, and timeout length. * * Arguments: * hw device structure * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_flashdl_enable(hfa384x_t *hw) { int result = 0; int i; /* Check that a port isn't active */ for (i = 0; i < HFA384x_PORTID_MAX; i++) { if (hw->port_enabled[i]) { pr_debug("called when port enabled.\n"); return -EINVAL; } } /* Check that we're not already in a download state */ if (hw->dlstate != HFA384x_DLSTATE_DISABLED) return -EINVAL; /* Retrieve the buffer loc&size and timeout */ result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DOWNLOADBUFFER, &(hw->bufinfo), sizeof(hw->bufinfo)); if (result) return result; hw->bufinfo.page = le16_to_cpu(hw->bufinfo.page); hw->bufinfo.offset = le16_to_cpu(hw->bufinfo.offset); hw->bufinfo.len = le16_to_cpu(hw->bufinfo.len); result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME, &(hw->dltimeout)); if (result) return result; hw->dltimeout = le16_to_cpu(hw->dltimeout); pr_debug("flashdl_enable\n"); hw->dlstate = HFA384x_DLSTATE_FLASHENABLED; return result; } /*---------------------------------------------------------------- * hfa384x_drvr_flashdl_disable * * Ends the flash download state. Note that this will cause the MAC * firmware to restart. * * Arguments: * hw device structure * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_flashdl_disable(hfa384x_t *hw) { /* Check that we're already in the download state */ if (hw->dlstate != HFA384x_DLSTATE_FLASHENABLED) return -EINVAL; pr_debug("flashdl_enable\n"); /* There isn't much we can do at this point, so I don't */ /* bother w/ the return value */ hfa384x_cmd_download(hw, HFA384x_PROGMODE_DISABLE, 0, 0, 0); hw->dlstate = HFA384x_DLSTATE_DISABLED; return 0; } /*---------------------------------------------------------------- * hfa384x_drvr_flashdl_write * * Performs a FLASH download of a chunk of data. First checks to see * that we're in the FLASH download state, then sets the download * mode, uses the aux functions to 1) copy the data to the flash * buffer, 2) sets the download 'write flash' mode, 3) readback and * compare. Lather rinse, repeat as many times an necessary to get * all the given data into flash. * When all data has been written using this function (possibly * repeatedly), call drvr_flashdl_disable() to end the download state * and restart the MAC. * * Arguments: * hw device structure * daddr Card address to write to. (host order) * buf Ptr to data to write. * len Length of data (host order). * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_flashdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len) { int result = 0; u32 dlbufaddr; int nburns; u32 burnlen; u32 burndaddr; u16 burnlo; u16 burnhi; int nwrites; u8 *writebuf; u16 writepage; u16 writeoffset; u32 writelen; int i; int j; pr_debug("daddr=0x%08x len=%d\n", daddr, len); /* Check that we're in the flash download state */ if (hw->dlstate != HFA384x_DLSTATE_FLASHENABLED) return -EINVAL; netdev_info(hw->wlandev->netdev, "Download %d bytes to flash @0x%06x\n", len, daddr); /* Convert to flat address for arithmetic */ /* NOTE: dlbuffer RID stores the address in AUX format */ dlbufaddr = HFA384x_ADDR_AUX_MKFLAT(hw->bufinfo.page, hw->bufinfo.offset); pr_debug("dlbuf.page=0x%04x dlbuf.offset=0x%04x dlbufaddr=0x%08x\n", hw->bufinfo.page, hw->bufinfo.offset, dlbufaddr); /* Calculations to determine how many fills of the dlbuffer to do * and how many USB wmemreq's to do for each fill. At this point * in time, the dlbuffer size and the wmemreq size are the same. * Therefore, nwrites should always be 1. The extra complexity * here is a hedge against future changes. */ /* Figure out how many times to do the flash programming */ nburns = len / hw->bufinfo.len; nburns += (len % hw->bufinfo.len) ? 1 : 0; /* For each flash program cycle, how many USB wmemreq's are needed? */ nwrites = hw->bufinfo.len / HFA384x_USB_RWMEM_MAXLEN; nwrites += (hw->bufinfo.len % HFA384x_USB_RWMEM_MAXLEN) ? 1 : 0; /* For each burn */ for (i = 0; i < nburns; i++) { /* Get the dest address and len */ burnlen = (len - (hw->bufinfo.len * i)) > hw->bufinfo.len ? hw->bufinfo.len : (len - (hw->bufinfo.len * i)); burndaddr = daddr + (hw->bufinfo.len * i); burnlo = HFA384x_ADDR_CMD_MKOFF(burndaddr); burnhi = HFA384x_ADDR_CMD_MKPAGE(burndaddr); netdev_info(hw->wlandev->netdev, "Writing %d bytes to flash @0x%06x\n", burnlen, burndaddr); /* Set the download mode */ result = hfa384x_cmd_download(hw, HFA384x_PROGMODE_NV, burnlo, burnhi, burnlen); if (result) { netdev_err(hw->wlandev->netdev, "download(NV,lo=%x,hi=%x,len=%x) cmd failed, result=%d. Aborting d/l\n", burnlo, burnhi, burnlen, result); goto exit_proc; } /* copy the data to the flash download buffer */ for (j = 0; j < nwrites; j++) { writebuf = buf + (i * hw->bufinfo.len) + (j * HFA384x_USB_RWMEM_MAXLEN); writepage = HFA384x_ADDR_CMD_MKPAGE(dlbufaddr + (j * HFA384x_USB_RWMEM_MAXLEN)); writeoffset = HFA384x_ADDR_CMD_MKOFF(dlbufaddr + (j * HFA384x_USB_RWMEM_MAXLEN)); writelen = burnlen - (j * HFA384x_USB_RWMEM_MAXLEN); writelen = writelen > HFA384x_USB_RWMEM_MAXLEN ? HFA384x_USB_RWMEM_MAXLEN : writelen; result = hfa384x_dowmem_wait(hw, writepage, writeoffset, writebuf, writelen); } /* set the download 'write flash' mode */ result = hfa384x_cmd_download(hw, HFA384x_PROGMODE_NVWRITE, 0, 0, 0); if (result) { netdev_err(hw->wlandev->netdev, "download(NVWRITE,lo=%x,hi=%x,len=%x) cmd failed, result=%d. Aborting d/l\n", burnlo, burnhi, burnlen, result); goto exit_proc; } /* TODO: We really should do a readback and compare. */ } exit_proc: /* Leave the firmware in the 'post-prog' mode. flashdl_disable will */ /* actually disable programming mode. Remember, that will cause the */ /* the firmware to effectively reset itself. */ return result; } /*---------------------------------------------------------------- * hfa384x_drvr_getconfig * * Performs the sequence necessary to read a config/info item. * * Arguments: * hw device structure * rid config/info record id (host order) * buf host side record buffer. Upon return it will * contain the body portion of the record (minus the * RID and len). * len buffer length (in bytes, should match record length) * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * -ENODATA length mismatch between argument and retrieved * record. * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_getconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len) { return hfa384x_dorrid_wait(hw, rid, buf, len); } /*---------------------------------------------------------------- * hfa384x_drvr_getconfig_async * * Performs the sequence necessary to perform an async read of * of a config/info item. * * Arguments: * hw device structure * rid config/info record id (host order) * buf host side record buffer. Upon return it will * contain the body portion of the record (minus the * RID and len). * len buffer length (in bytes, should match record length) * cbfn caller supplied callback, called when the command * is done (successful or not). * cbfndata pointer to some caller supplied data that will be * passed in as an argument to the cbfn. * * Returns: * nothing the cbfn gets a status argument identifying if * any errors occur. * Side effects: * Queues an hfa384x_usbcmd_t for subsequent execution. * * Call context: * Any ----------------------------------------------------------------*/ int hfa384x_drvr_getconfig_async(hfa384x_t *hw, u16 rid, ctlx_usercb_t usercb, void *usercb_data) { return hfa384x_dorrid_async(hw, rid, NULL, 0, hfa384x_cb_rrid, usercb, usercb_data); } /*---------------------------------------------------------------- * hfa384x_drvr_setconfig_async * * Performs the sequence necessary to write a config/info item. * * Arguments: * hw device structure * rid config/info record id (in host order) * buf host side record buffer * len buffer length (in bytes) * usercb completion callback * usercb_data completion callback argument * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_setconfig_async(hfa384x_t *hw, u16 rid, void *buf, u16 len, ctlx_usercb_t usercb, void *usercb_data) { return hfa384x_dowrid_async(hw, rid, buf, len, hfa384x_cb_status, usercb, usercb_data); } /*---------------------------------------------------------------- * hfa384x_drvr_ramdl_disable * * Ends the ram download state. * * Arguments: * hw device structure * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_ramdl_disable(hfa384x_t *hw) { /* Check that we're already in the download state */ if (hw->dlstate != HFA384x_DLSTATE_RAMENABLED) return -EINVAL; pr_debug("ramdl_disable()\n"); /* There isn't much we can do at this point, so I don't */ /* bother w/ the return value */ hfa384x_cmd_download(hw, HFA384x_PROGMODE_DISABLE, 0, 0, 0); hw->dlstate = HFA384x_DLSTATE_DISABLED; return 0; } /*---------------------------------------------------------------- * hfa384x_drvr_ramdl_enable * * Begins the ram download state. Checks to see that we're not * already in a download state and that a port isn't enabled. * Sets the download state and calls cmd_download with the * ENABLE_VOLATILE subcommand and the exeaddr argument. * * Arguments: * hw device structure * exeaddr the card execution address that will be * jumped to when ramdl_disable() is called * (host order). * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_ramdl_enable(hfa384x_t *hw, u32 exeaddr) { int result = 0; u16 lowaddr; u16 hiaddr; int i; /* Check that a port isn't active */ for (i = 0; i < HFA384x_PORTID_MAX; i++) { if (hw->port_enabled[i]) { netdev_err(hw->wlandev->netdev, "Can't download with a macport enabled.\n"); return -EINVAL; } } /* Check that we're not already in a download state */ if (hw->dlstate != HFA384x_DLSTATE_DISABLED) { netdev_err(hw->wlandev->netdev, "Download state not disabled.\n"); return -EINVAL; } pr_debug("ramdl_enable, exeaddr=0x%08x\n", exeaddr); /* Call the download(1,addr) function */ lowaddr = HFA384x_ADDR_CMD_MKOFF(exeaddr); hiaddr = HFA384x_ADDR_CMD_MKPAGE(exeaddr); result = hfa384x_cmd_download(hw, HFA384x_PROGMODE_RAM, lowaddr, hiaddr, 0); if (result == 0) { /* Set the download state */ hw->dlstate = HFA384x_DLSTATE_RAMENABLED; } else { pr_debug("cmd_download(0x%04x, 0x%04x) failed, result=%d.\n", lowaddr, hiaddr, result); } return result; } /*---------------------------------------------------------------- * hfa384x_drvr_ramdl_write * * Performs a RAM download of a chunk of data. First checks to see * that we're in the RAM download state, then uses the [read|write]mem USB * commands to 1) copy the data, 2) readback and compare. The download * state is unaffected. When all data has been written using * this function, call drvr_ramdl_disable() to end the download state * and restart the MAC. * * Arguments: * hw device structure * daddr Card address to write to. (host order) * buf Ptr to data to write. * len Length of data (host order). * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len) { int result = 0; int nwrites; u8 *data = buf; int i; u32 curraddr; u16 currpage; u16 curroffset; u16 currlen; /* Check that we're in the ram download state */ if (hw->dlstate != HFA384x_DLSTATE_RAMENABLED) return -EINVAL; netdev_info(hw->wlandev->netdev, "Writing %d bytes to ram @0x%06x\n", len, daddr); /* How many dowmem calls? */ nwrites = len / HFA384x_USB_RWMEM_MAXLEN; nwrites += len % HFA384x_USB_RWMEM_MAXLEN ? 1 : 0; /* Do blocking wmem's */ for (i = 0; i < nwrites; i++) { /* make address args */ curraddr = daddr + (i * HFA384x_USB_RWMEM_MAXLEN); currpage = HFA384x_ADDR_CMD_MKPAGE(curraddr); curroffset = HFA384x_ADDR_CMD_MKOFF(curraddr); currlen = len - (i * HFA384x_USB_RWMEM_MAXLEN); if (currlen > HFA384x_USB_RWMEM_MAXLEN) currlen = HFA384x_USB_RWMEM_MAXLEN; /* Do blocking ctlx */ result = hfa384x_dowmem_wait(hw, currpage, curroffset, data + (i * HFA384x_USB_RWMEM_MAXLEN), currlen); if (result) break; /* TODO: We really should have a readback. */ } return result; } /*---------------------------------------------------------------- * hfa384x_drvr_readpda * * Performs the sequence to read the PDA space. Note there is no * drvr_writepda() function. Writing a PDA is * generally implemented by a calling component via calls to * cmd_download and writing to the flash download buffer via the * aux regs. * * Arguments: * hw device structure * buf buffer to store PDA in * len buffer length * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * -ETIMEDOUT timout waiting for the cmd regs to become * available, or waiting for the control reg * to indicate the Aux port is enabled. * -ENODATA the buffer does NOT contain a valid PDA. * Either the card PDA is bad, or the auxdata * reads are giving us garbage. * * Side effects: * * Call context: * process or non-card interrupt. ----------------------------------------------------------------*/ int hfa384x_drvr_readpda(hfa384x_t *hw, void *buf, unsigned int len) { int result = 0; u16 *pda = buf; int pdaok = 0; int morepdrs = 1; int currpdr = 0; /* word offset of the current pdr */ size_t i; u16 pdrlen; /* pdr length in bytes, host order */ u16 pdrcode; /* pdr code, host order */ u16 currpage; u16 curroffset; struct pdaloc { u32 cardaddr; u16 auxctl; } pdaloc[] = { { HFA3842_PDA_BASE, 0}, { HFA3841_PDA_BASE, 0}, { HFA3841_PDA_BOGUS_BASE, 0} }; /* Read the pda from each known address. */ for (i = 0; i < ARRAY_SIZE(pdaloc); i++) { /* Make address */ currpage = HFA384x_ADDR_CMD_MKPAGE(pdaloc[i].cardaddr); curroffset = HFA384x_ADDR_CMD_MKOFF(pdaloc[i].cardaddr); /* units of bytes */ result = hfa384x_dormem_wait(hw, currpage, curroffset, buf, len); if (result) { netdev_warn(hw->wlandev->netdev, "Read from index %zd failed, continuing\n", i); continue; } /* Test for garbage */ pdaok = 1; /* initially assume good */ morepdrs = 1; while (pdaok && morepdrs) { pdrlen = le16_to_cpu(pda[currpdr]) * 2; pdrcode = le16_to_cpu(pda[currpdr + 1]); /* Test the record length */ if (pdrlen > HFA384x_PDR_LEN_MAX || pdrlen == 0) { netdev_err(hw->wlandev->netdev, "pdrlen invalid=%d\n", pdrlen); pdaok = 0; break; } /* Test the code */ if (!hfa384x_isgood_pdrcode(pdrcode)) { netdev_err(hw->wlandev->netdev, "pdrcode invalid=%d\n", pdrcode); pdaok = 0; break; } /* Test for completion */ if (pdrcode == HFA384x_PDR_END_OF_PDA) morepdrs = 0; /* Move to the next pdr (if necessary) */ if (morepdrs) { /* note the access to pda[], need words here */ currpdr += le16_to_cpu(pda[currpdr]) + 1; } } if (pdaok) { netdev_info(hw->wlandev->netdev, "PDA Read from 0x%08x in %s space.\n", pdaloc[i].cardaddr, pdaloc[i].auxctl == 0 ? "EXTDS" : pdaloc[i].auxctl == 1 ? "NV" : pdaloc[i].auxctl == 2 ? "PHY" : pdaloc[i].auxctl == 3 ? "ICSRAM" : "<bogus auxctl>"); break; } } result = pdaok ? 0 : -ENODATA; if (result) pr_debug("Failure: pda is not okay\n"); return result; } /*---------------------------------------------------------------- * hfa384x_drvr_setconfig * * Performs the sequence necessary to write a config/info item. * * Arguments: * hw device structure * rid config/info record id (in host order) * buf host side record buffer * len buffer length (in bytes) * * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_setconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len) { return hfa384x_dowrid_wait(hw, rid, buf, len); } /*---------------------------------------------------------------- * hfa384x_drvr_start * * Issues the MAC initialize command, sets up some data structures, * and enables the interrupts. After this function completes, the * low-level stuff should be ready for any/all commands. * * Arguments: * hw device structure * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_start(hfa384x_t *hw) { int result, result1, result2; u16 status; might_sleep(); /* Clear endpoint stalls - but only do this if the endpoint * is showing a stall status. Some prism2 cards seem to behave * badly if a clear_halt is called when the endpoint is already * ok */ result = usb_get_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in, &status); if (result < 0) { netdev_err(hw->wlandev->netdev, "Cannot get bulk in endpoint status.\n"); goto done; } if ((status == 1) && usb_clear_halt(hw->usb, hw->endp_in)) netdev_err(hw->wlandev->netdev, "Failed to reset bulk in endpoint.\n"); result = usb_get_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out, &status); if (result < 0) { netdev_err(hw->wlandev->netdev, "Cannot get bulk out endpoint status.\n"); goto done; } if ((status == 1) && usb_clear_halt(hw->usb, hw->endp_out)) netdev_err(hw->wlandev->netdev, "Failed to reset bulk out endpoint.\n"); /* Synchronous unlink, in case we're trying to restart the driver */ usb_kill_urb(&hw->rx_urb); /* Post the IN urb */ result = submit_rx_urb(hw, GFP_KERNEL); if (result != 0) { netdev_err(hw->wlandev->netdev, "Fatal, failed to submit RX URB, result=%d\n", result); goto done; } /* Call initialize twice, with a 1 second sleep in between. * This is a nasty work-around since many prism2 cards seem to * need time to settle after an init from cold. The second * call to initialize in theory is not necessary - but we call * it anyway as a double insurance policy: * 1) If the first init should fail, the second may well succeed * and the card can still be used * 2) It helps ensures all is well with the card after the first * init and settle time. */ result1 = hfa384x_cmd_initialize(hw); msleep(1000); result = hfa384x_cmd_initialize(hw); result2 = result; if (result1 != 0) { if (result2 != 0) { netdev_err(hw->wlandev->netdev, "cmd_initialize() failed on two attempts, results %d and %d\n", result1, result2); usb_kill_urb(&hw->rx_urb); goto done; } else { pr_debug("First cmd_initialize() failed (result %d),\n", result1); pr_debug("but second attempt succeeded. All should be ok\n"); } } else if (result2 != 0) { netdev_warn(hw->wlandev->netdev, "First cmd_initialize() succeeded, but second attempt failed (result=%d)\n", result2); netdev_warn(hw->wlandev->netdev, "Most likely the card will be functional\n"); goto done; } hw->state = HFA384x_STATE_RUNNING; done: return result; } /*---------------------------------------------------------------- * hfa384x_drvr_stop * * Shuts down the MAC to the point where it is safe to unload the * driver. Any subsystem that may be holding a data or function * ptr into the driver must be cleared/deinitialized. * * Arguments: * hw device structure * Returns: * 0 success * >0 f/w reported error - f/w status code * <0 driver reported error * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ int hfa384x_drvr_stop(hfa384x_t *hw) { int i; might_sleep(); /* There's no need for spinlocks here. The USB "disconnect" * function sets this "removed" flag and then calls us. */ if (!hw->wlandev->hwremoved) { /* Call initialize to leave the MAC in its 'reset' state */ hfa384x_cmd_initialize(hw); /* Cancel the rxurb */ usb_kill_urb(&hw->rx_urb); } hw->link_status = HFA384x_LINK_NOTCONNECTED; hw->state = HFA384x_STATE_INIT; del_timer_sync(&hw->commsqual_timer); /* Clear all the port status */ for (i = 0; i < HFA384x_NUMPORTS_MAX; i++) hw->port_enabled[i] = 0; return 0; } /*---------------------------------------------------------------- * hfa384x_drvr_txframe * * Takes a frame from prism2sta and queues it for transmission. * * Arguments: * hw device structure * skb packet buffer struct. Contains an 802.11 * data frame. * p80211_hdr points to the 802.11 header for the packet. * Returns: * 0 Success and more buffs available * 1 Success but no more buffs * 2 Allocation failure * 4 Buffer full or queue busy * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ int hfa384x_drvr_txframe(hfa384x_t *hw, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep) { int usbpktlen = sizeof(hfa384x_tx_frame_t); int result; int ret; char *ptr; if (hw->tx_urb.status == -EINPROGRESS) { netdev_warn(hw->wlandev->netdev, "TX URB already in use\n"); result = 3; goto exit; } /* Build Tx frame structure */ /* Set up the control field */ memset(&hw->txbuff.txfrm.desc, 0, sizeof(hw->txbuff.txfrm.desc)); /* Setup the usb type field */ hw->txbuff.type = cpu_to_le16(HFA384x_USB_TXFRM); /* Set up the sw_support field to identify this frame */ hw->txbuff.txfrm.desc.sw_support = 0x0123; /* Tx complete and Tx exception disable per dleach. Might be causing * buf depletion */ /* #define DOEXC SLP -- doboth breaks horribly under load, doexc less so. */ #if defined(DOBOTH) hw->txbuff.txfrm.desc.tx_control = HFA384x_TX_MACPORT_SET(0) | HFA384x_TX_STRUCTYPE_SET(1) | HFA384x_TX_TXEX_SET(1) | HFA384x_TX_TXOK_SET(1); #elif defined(DOEXC) hw->txbuff.txfrm.desc.tx_control = HFA384x_TX_MACPORT_SET(0) | HFA384x_TX_STRUCTYPE_SET(1) | HFA384x_TX_TXEX_SET(1) | HFA384x_TX_TXOK_SET(0); #else hw->txbuff.txfrm.desc.tx_control = HFA384x_TX_MACPORT_SET(0) | HFA384x_TX_STRUCTYPE_SET(1) | HFA384x_TX_TXEX_SET(0) | HFA384x_TX_TXOK_SET(0); #endif hw->txbuff.txfrm.desc.tx_control = cpu_to_le16(hw->txbuff.txfrm.desc.tx_control); /* copy the header over to the txdesc */ memcpy(&(hw->txbuff.txfrm.desc.frame_control), p80211_hdr, sizeof(union p80211_hdr)); /* if we're using host WEP, increase size by IV+ICV */ if (p80211_wep->data) { hw->txbuff.txfrm.desc.data_len = cpu_to_le16(skb->len + 8); usbpktlen += 8; } else { hw->txbuff.txfrm.desc.data_len = cpu_to_le16(skb->len); } usbpktlen += skb->len; /* copy over the WEP IV if we are using host WEP */ ptr = hw->txbuff.txfrm.data; if (p80211_wep->data) { memcpy(ptr, p80211_wep->iv, sizeof(p80211_wep->iv)); ptr += sizeof(p80211_wep->iv); memcpy(ptr, p80211_wep->data, skb->len); } else { memcpy(ptr, skb->data, skb->len); } /* copy over the packet data */ ptr += skb->len; /* copy over the WEP ICV if we are using host WEP */ if (p80211_wep->data) memcpy(ptr, p80211_wep->icv, sizeof(p80211_wep->icv)); /* Send the USB packet */ usb_fill_bulk_urb(&(hw->tx_urb), hw->usb, hw->endp_out, &(hw->txbuff), ROUNDUP64(usbpktlen), hfa384x_usbout_callback, hw->wlandev); hw->tx_urb.transfer_flags |= USB_QUEUE_BULK; result = 1; ret = submit_tx_urb(hw, &hw->tx_urb, GFP_ATOMIC); if (ret != 0) { netdev_err(hw->wlandev->netdev, "submit_tx_urb() failed, error=%d\n", ret); result = 3; } exit: return result; } void hfa384x_tx_timeout(wlandevice_t *wlandev) { hfa384x_t *hw = wlandev->priv; unsigned long flags; spin_lock_irqsave(&hw->ctlxq.lock, flags); if (!hw->wlandev->hwremoved) { int sched; sched = !test_and_set_bit(WORK_TX_HALT, &hw->usb_flags); sched |= !test_and_set_bit(WORK_RX_HALT, &hw->usb_flags); if (sched) schedule_work(&hw->usb_work); } spin_unlock_irqrestore(&hw->ctlxq.lock, flags); } /*---------------------------------------------------------------- * hfa384x_usbctlx_reaper_task * * Tasklet to delete dead CTLX objects * * Arguments: * data ptr to a hfa384x_t * * Returns: * * Call context: * Interrupt ----------------------------------------------------------------*/ static void hfa384x_usbctlx_reaper_task(unsigned long data) { hfa384x_t *hw = (hfa384x_t *)data; struct list_head *entry; struct list_head *temp; unsigned long flags; spin_lock_irqsave(&hw->ctlxq.lock, flags); /* This list is guaranteed to be empty if someone * has unplugged the adapter. */ list_for_each_safe(entry, temp, &hw->ctlxq.reapable) { hfa384x_usbctlx_t *ctlx; ctlx = list_entry(entry, hfa384x_usbctlx_t, list); list_del(&ctlx->list); kfree(ctlx); } spin_unlock_irqrestore(&hw->ctlxq.lock, flags); } /*---------------------------------------------------------------- * hfa384x_usbctlx_completion_task * * Tasklet to call completion handlers for returned CTLXs * * Arguments: * data ptr to hfa384x_t * * Returns: * Nothing * * Call context: * Interrupt ----------------------------------------------------------------*/ static void hfa384x_usbctlx_completion_task(unsigned long data) { hfa384x_t *hw = (hfa384x_t *)data; struct list_head *entry; struct list_head *temp; unsigned long flags; int reap = 0; spin_lock_irqsave(&hw->ctlxq.lock, flags); /* This list is guaranteed to be empty if someone * has unplugged the adapter ... */ list_for_each_safe(entry, temp, &hw->ctlxq.completing) { hfa384x_usbctlx_t *ctlx; ctlx = list_entry(entry, hfa384x_usbctlx_t, list); /* Call the completion function that this * command was assigned, assuming it has one. */ if (ctlx->cmdcb != NULL) { spin_unlock_irqrestore(&hw->ctlxq.lock, flags); ctlx->cmdcb(hw, ctlx); spin_lock_irqsave(&hw->ctlxq.lock, flags); /* Make sure we don't try and complete * this CTLX more than once! */ ctlx->cmdcb = NULL; /* Did someone yank the adapter out * while our list was (briefly) unlocked? */ if (hw->wlandev->hwremoved) { reap = 0; break; } } /* * "Reapable" CTLXs are ones which don't have any * threads waiting for them to die. Hence they must * be delivered to The Reaper! */ if (ctlx->reapable) { /* Move the CTLX off the "completing" list (hopefully) * on to the "reapable" list where the reaper task * can find it. And "reapable" means that this CTLX * isn't sitting on a wait-queue somewhere. */ list_move_tail(&ctlx->list, &hw->ctlxq.reapable); reap = 1; } complete(&ctlx->done); } spin_unlock_irqrestore(&hw->ctlxq.lock, flags); if (reap) tasklet_schedule(&hw->reaper_bh); } /*---------------------------------------------------------------- * unlocked_usbctlx_cancel_async * * Mark the CTLX dead asynchronously, and ensure that the * next command on the queue is run afterwards. * * Arguments: * hw ptr to the hfa384x_t structure * ctlx ptr to a CTLX structure * * Returns: * 0 the CTLX's URB is inactive * -EINPROGRESS the URB is currently being unlinked * * Call context: * Either process or interrupt, but presumably interrupt ----------------------------------------------------------------*/ static int unlocked_usbctlx_cancel_async(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx) { int ret; /* * Try to delete the URB containing our request packet. * If we succeed, then its completion handler will be * called with a status of -ECONNRESET. */ hw->ctlx_urb.transfer_flags |= URB_ASYNC_UNLINK; ret = usb_unlink_urb(&hw->ctlx_urb); if (ret != -EINPROGRESS) { /* * The OUT URB had either already completed * or was still in the pending queue, so the * URB's completion function will not be called. * We will have to complete the CTLX ourselves. */ ctlx->state = CTLX_REQ_FAILED; unlocked_usbctlx_complete(hw, ctlx); ret = 0; } return ret; } /*---------------------------------------------------------------- * unlocked_usbctlx_complete * * A CTLX has completed. It may have been successful, it may not * have been. At this point, the CTLX should be quiescent. The URBs * aren't active and the timers should have been stopped. * * The CTLX is migrated to the "completing" queue, and the completing * tasklet is scheduled. * * Arguments: * hw ptr to a hfa384x_t structure * ctlx ptr to a ctlx structure * * Returns: * nothing * * Side effects: * * Call context: * Either, assume interrupt ----------------------------------------------------------------*/ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx) { /* Timers have been stopped, and ctlx should be in * a terminal state. Retire it from the "active" * queue. */ list_move_tail(&ctlx->list, &hw->ctlxq.completing); tasklet_schedule(&hw->completion_bh); switch (ctlx->state) { case CTLX_COMPLETE: case CTLX_REQ_FAILED: /* This are the correct terminating states. */ break; default: netdev_err(hw->wlandev->netdev, "CTLX[%d] not in a terminating state(%s)\n", le16_to_cpu(ctlx->outbuf.type), ctlxstr(ctlx->state)); break; } /* switch */ } /*---------------------------------------------------------------- * hfa384x_usbctlxq_run * * Checks to see if the head item is running. If not, starts it. * * Arguments: * hw ptr to hfa384x_t * * Returns: * nothing * * Side effects: * * Call context: * any ----------------------------------------------------------------*/ static void hfa384x_usbctlxq_run(hfa384x_t *hw) { unsigned long flags; /* acquire lock */ spin_lock_irqsave(&hw->ctlxq.lock, flags); /* Only one active CTLX at any one time, because there's no * other (reliable) way to match the response URB to the * correct CTLX. * * Don't touch any of these CTLXs if the hardware * has been removed or the USB subsystem is stalled. */ if (!list_empty(&hw->ctlxq.active) || test_bit(WORK_TX_HALT, &hw->usb_flags) || hw->wlandev->hwremoved) goto unlock; while (!list_empty(&hw->ctlxq.pending)) { hfa384x_usbctlx_t *head; int result; /* This is the first pending command */ head = list_entry(hw->ctlxq.pending.next, hfa384x_usbctlx_t, list); /* We need to split this off to avoid a race condition */ list_move_tail(&head->list, &hw->ctlxq.active); /* Fill the out packet */ usb_fill_bulk_urb(&(hw->ctlx_urb), hw->usb, hw->endp_out, &(head->outbuf), ROUNDUP64(head->outbufsize), hfa384x_ctlxout_callback, hw); hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK; /* Now submit the URB and update the CTLX's state */ result = SUBMIT_URB(&hw->ctlx_urb, GFP_ATOMIC); if (result == 0) { /* This CTLX is now running on the active queue */ head->state = CTLX_REQ_SUBMITTED; /* Start the OUT wait timer */ hw->req_timer_done = 0; hw->reqtimer.expires = jiffies + HZ; add_timer(&hw->reqtimer); /* Start the IN wait timer */ hw->resp_timer_done = 0; hw->resptimer.expires = jiffies + 2 * HZ; add_timer(&hw->resptimer); break; } if (result == -EPIPE) { /* The OUT pipe needs resetting, so put * this CTLX back in the "pending" queue * and schedule a reset ... */ netdev_warn(hw->wlandev->netdev, "%s tx pipe stalled: requesting reset\n", hw->wlandev->netdev->name); list_move(&head->list, &hw->ctlxq.pending); set_bit(WORK_TX_HALT, &hw->usb_flags); schedule_work(&hw->usb_work); break; } if (result == -ESHUTDOWN) { netdev_warn(hw->wlandev->netdev, "%s urb shutdown!\n", hw->wlandev->netdev->name); break; } netdev_err(hw->wlandev->netdev, "Failed to submit CTLX[%d]: error=%d\n", le16_to_cpu(head->outbuf.type), result); unlocked_usbctlx_complete(hw, head); } /* while */ unlock: spin_unlock_irqrestore(&hw->ctlxq.lock, flags); } /*---------------------------------------------------------------- * hfa384x_usbin_callback * * Callback for URBs on the BULKIN endpoint. * * Arguments: * urb ptr to the completed urb * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbin_callback(struct urb *urb) { wlandevice_t *wlandev = urb->context; hfa384x_t *hw; hfa384x_usbin_t *usbin = (hfa384x_usbin_t *)urb->transfer_buffer; struct sk_buff *skb = NULL; int result; int urb_status; u16 type; enum USBIN_ACTION { HANDLE, RESUBMIT, ABORT } action; if (!wlandev || !wlandev->netdev || wlandev->hwremoved) goto exit; hw = wlandev->priv; if (!hw) goto exit; skb = hw->rx_urb_skb; BUG_ON(!skb || (skb->data != urb->transfer_buffer)); hw->rx_urb_skb = NULL; /* Check for error conditions within the URB */ switch (urb->status) { case 0: action = HANDLE; /* Check for short packet */ if (urb->actual_length == 0) { ++(wlandev->linux_stats.rx_errors); ++(wlandev->linux_stats.rx_length_errors); action = RESUBMIT; } break; case -EPIPE: netdev_warn(hw->wlandev->netdev, "%s rx pipe stalled: requesting reset\n", wlandev->netdev->name); if (!test_and_set_bit(WORK_RX_HALT, &hw->usb_flags)) schedule_work(&hw->usb_work); ++(wlandev->linux_stats.rx_errors); action = ABORT; break; case -EILSEQ: case -ETIMEDOUT: case -EPROTO: if (!test_and_set_bit(THROTTLE_RX, &hw->usb_flags) && !timer_pending(&hw->throttle)) { mod_timer(&hw->throttle, jiffies + THROTTLE_JIFFIES); } ++(wlandev->linux_stats.rx_errors); action = ABORT; break; case -EOVERFLOW: ++(wlandev->linux_stats.rx_over_errors); action = RESUBMIT; break; case -ENODEV: case -ESHUTDOWN: pr_debug("status=%d, device removed.\n", urb->status); action = ABORT; break; case -ENOENT: case -ECONNRESET: pr_debug("status=%d, urb explicitly unlinked.\n", urb->status); action = ABORT; break; default: pr_debug("urb status=%d, transfer flags=0x%x\n", urb->status, urb->transfer_flags); ++(wlandev->linux_stats.rx_errors); action = RESUBMIT; break; } urb_status = urb->status; if (action != ABORT) { /* Repost the RX URB */ result = submit_rx_urb(hw, GFP_ATOMIC); if (result != 0) { netdev_err(hw->wlandev->netdev, "Fatal, failed to resubmit rx_urb. error=%d\n", result); } } /* Handle any USB-IN packet */ /* Note: the check of the sw_support field, the type field doesn't * have bit 12 set like the docs suggest. */ type = le16_to_cpu(usbin->type); if (HFA384x_USB_ISRXFRM(type)) { if (action == HANDLE) { if (usbin->txfrm.desc.sw_support == 0x0123) { hfa384x_usbin_txcompl(wlandev, usbin); } else { skb_put(skb, sizeof(*usbin)); hfa384x_usbin_rx(wlandev, skb); skb = NULL; } } goto exit; } if (HFA384x_USB_ISTXFRM(type)) { if (action == HANDLE) hfa384x_usbin_txcompl(wlandev, usbin); goto exit; } switch (type) { case HFA384x_USB_INFOFRM: if (action == ABORT) goto exit; if (action == HANDLE) hfa384x_usbin_info(wlandev, usbin); break; case HFA384x_USB_CMDRESP: case HFA384x_USB_WRIDRESP: case HFA384x_USB_RRIDRESP: case HFA384x_USB_WMEMRESP: case HFA384x_USB_RMEMRESP: /* ALWAYS, ALWAYS, ALWAYS handle this CTLX!!!! */ hfa384x_usbin_ctlx(hw, usbin, urb_status); break; case HFA384x_USB_BUFAVAIL: pr_debug("Received BUFAVAIL packet, frmlen=%d\n", usbin->bufavail.frmlen); break; case HFA384x_USB_ERROR: pr_debug("Received USB_ERROR packet, errortype=%d\n", usbin->usberror.errortype); break; default: pr_debug("Unrecognized USBIN packet, type=%x, status=%d\n", usbin->type, urb_status); break; } /* switch */ exit: if (skb) dev_kfree_skb(skb); } /*---------------------------------------------------------------- * hfa384x_usbin_ctlx * * We've received a URB containing a Prism2 "response" message. * This message needs to be matched up with a CTLX on the active * queue and our state updated accordingly. * * Arguments: * hw ptr to hfa384x_t * usbin ptr to USB IN packet * urb_status status of this Bulk-In URB * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbin_ctlx(hfa384x_t *hw, hfa384x_usbin_t *usbin, int urb_status) { hfa384x_usbctlx_t *ctlx; int run_queue = 0; unsigned long flags; retry: spin_lock_irqsave(&hw->ctlxq.lock, flags); /* There can be only one CTLX on the active queue * at any one time, and this is the CTLX that the * timers are waiting for. */ if (list_empty(&hw->ctlxq.active)) goto unlock; /* Remove the "response timeout". It's possible that * we are already too late, and that the timeout is * already running. And that's just too bad for us, * because we could lose our CTLX from the active * queue here ... */ if (del_timer(&hw->resptimer) == 0) { if (hw->resp_timer_done == 0) { spin_unlock_irqrestore(&hw->ctlxq.lock, flags); goto retry; } } else { hw->resp_timer_done = 1; } ctlx = get_active_ctlx(hw); if (urb_status != 0) { /* * Bad CTLX, so get rid of it. But we only * remove it from the active queue if we're no * longer expecting the OUT URB to complete. */ if (unlocked_usbctlx_cancel_async(hw, ctlx) == 0) run_queue = 1; } else { const u16 intype = (usbin->type & ~cpu_to_le16(0x8000)); /* * Check that our message is what we're expecting ... */ if (ctlx->outbuf.type != intype) { netdev_warn(hw->wlandev->netdev, "Expected IN[%d], received IN[%d] - ignored.\n", le16_to_cpu(ctlx->outbuf.type), le16_to_cpu(intype)); goto unlock; } /* This URB has succeeded, so grab the data ... */ memcpy(&ctlx->inbuf, usbin, sizeof(ctlx->inbuf)); switch (ctlx->state) { case CTLX_REQ_SUBMITTED: /* * We have received our response URB before * our request has been acknowledged. Odd, * but our OUT URB is still alive... */ pr_debug("Causality violation: please reboot Universe\n"); ctlx->state = CTLX_RESP_COMPLETE; break; case CTLX_REQ_COMPLETE: /* * This is the usual path: our request * has already been acknowledged, and * now we have received the reply too. */ ctlx->state = CTLX_COMPLETE; unlocked_usbctlx_complete(hw, ctlx); run_queue = 1; break; default: /* * Throw this CTLX away ... */ netdev_err(hw->wlandev->netdev, "Matched IN URB, CTLX[%d] in invalid state(%s). Discarded.\n", le16_to_cpu(ctlx->outbuf.type), ctlxstr(ctlx->state)); if (unlocked_usbctlx_cancel_async(hw, ctlx) == 0) run_queue = 1; break; } /* switch */ } unlock: spin_unlock_irqrestore(&hw->ctlxq.lock, flags); if (run_queue) hfa384x_usbctlxq_run(hw); } /*---------------------------------------------------------------- * hfa384x_usbin_txcompl * * At this point we have the results of a previous transmit. * * Arguments: * wlandev wlan device * usbin ptr to the usb transfer buffer * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbin_txcompl(wlandevice_t *wlandev, hfa384x_usbin_t *usbin) { u16 status; status = le16_to_cpu(usbin->type); /* yeah I know it says type... */ /* Was there an error? */ if (HFA384x_TXSTATUS_ISERROR(status)) prism2sta_ev_txexc(wlandev, status); else prism2sta_ev_tx(wlandev, status); } /*---------------------------------------------------------------- * hfa384x_usbin_rx * * At this point we have a successful received a rx frame packet. * * Arguments: * wlandev wlan device * usbin ptr to the usb transfer buffer * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbin_rx(wlandevice_t *wlandev, struct sk_buff *skb) { hfa384x_usbin_t *usbin = (hfa384x_usbin_t *)skb->data; hfa384x_t *hw = wlandev->priv; int hdrlen; struct p80211_rxmeta *rxmeta; u16 data_len; u16 fc; /* Byte order convert once up front. */ usbin->rxfrm.desc.status = le16_to_cpu(usbin->rxfrm.desc.status); usbin->rxfrm.desc.time = le32_to_cpu(usbin->rxfrm.desc.time); /* Now handle frame based on port# */ switch (HFA384x_RXSTATUS_MACPORT_GET(usbin->rxfrm.desc.status)) { case 0: fc = le16_to_cpu(usbin->rxfrm.desc.frame_control); /* If exclude and we receive an unencrypted, drop it */ if ((wlandev->hostwep & HOSTWEP_EXCLUDEUNENCRYPTED) && !WLAN_GET_FC_ISWEP(fc)) { goto done; } data_len = le16_to_cpu(usbin->rxfrm.desc.data_len); /* How much header data do we have? */ hdrlen = p80211_headerlen(fc); /* Pull off the descriptor */ skb_pull(skb, sizeof(hfa384x_rx_frame_t)); /* Now shunt the header block up against the data block * with an "overlapping" copy */ memmove(skb_push(skb, hdrlen), &usbin->rxfrm.desc.frame_control, hdrlen); skb->dev = wlandev->netdev; skb->dev->last_rx = jiffies; /* And set the frame length properly */ skb_trim(skb, data_len + hdrlen); /* The prism2 series does not return the CRC */ memset(skb_put(skb, WLAN_CRC_LEN), 0xff, WLAN_CRC_LEN); skb_reset_mac_header(skb); /* Attach the rxmeta, set some stuff */ p80211skb_rxmeta_attach(wlandev, skb); rxmeta = P80211SKB_RXMETA(skb); rxmeta->mactime = usbin->rxfrm.desc.time; rxmeta->rxrate = usbin->rxfrm.desc.rate; rxmeta->signal = usbin->rxfrm.desc.signal - hw->dbmadjust; rxmeta->noise = usbin->rxfrm.desc.silence - hw->dbmadjust; prism2sta_ev_rx(wlandev, skb); break; case 7: if (!HFA384x_RXSTATUS_ISFCSERR(usbin->rxfrm.desc.status)) { /* Copy to wlansnif skb */ hfa384x_int_rxmonitor(wlandev, &usbin->rxfrm); dev_kfree_skb(skb); } else { pr_debug("Received monitor frame: FCSerr set\n"); } break; default: netdev_warn(hw->wlandev->netdev, "Received frame on unsupported port=%d\n", HFA384x_RXSTATUS_MACPORT_GET( usbin->rxfrm.desc.status)); goto done; break; } done: return; } /*---------------------------------------------------------------- * hfa384x_int_rxmonitor * * Helper function for int_rx. Handles monitor frames. * Note that this function allocates space for the FCS and sets it * to 0xffffffff. The hfa384x doesn't give us the FCS value but the * higher layers expect it. 0xffffffff is used as a flag to indicate * the FCS is bogus. * * Arguments: * wlandev wlan device structure * rxfrm rx descriptor read from card in int_rx * * Returns: * nothing * * Side effects: * Allocates an skb and passes it up via the PF_PACKET interface. * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev, hfa384x_usb_rxfrm_t *rxfrm) { hfa384x_rx_frame_t *rxdesc = &(rxfrm->desc); unsigned int hdrlen = 0; unsigned int datalen = 0; unsigned int skblen = 0; u8 *datap; u16 fc; struct sk_buff *skb; hfa384x_t *hw = wlandev->priv; /* Remember the status, time, and data_len fields are in host order */ /* Figure out how big the frame is */ fc = le16_to_cpu(rxdesc->frame_control); hdrlen = p80211_headerlen(fc); datalen = le16_to_cpu(rxdesc->data_len); /* Allocate an ind message+framesize skb */ skblen = sizeof(struct p80211_caphdr) + hdrlen + datalen + WLAN_CRC_LEN; /* sanity check the length */ if (skblen > (sizeof(struct p80211_caphdr) + WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) { pr_debug("overlen frm: len=%zd\n", skblen - sizeof(struct p80211_caphdr)); } skb = dev_alloc_skb(skblen); if (skb == NULL) { netdev_err(hw->wlandev->netdev, "alloc_skb failed trying to allocate %d bytes\n", skblen); return; } /* only prepend the prism header if in the right mode */ if ((wlandev->netdev->type == ARPHRD_IEEE80211_PRISM) && (hw->sniffhdr != 0)) { struct p80211_caphdr *caphdr; /* The NEW header format! */ datap = skb_put(skb, sizeof(struct p80211_caphdr)); caphdr = (struct p80211_caphdr *)datap; caphdr->version = htonl(P80211CAPTURE_VERSION); caphdr->length = htonl(sizeof(struct p80211_caphdr)); caphdr->mactime = __cpu_to_be64(rxdesc->time) * 1000; caphdr->hosttime = __cpu_to_be64(jiffies); caphdr->phytype = htonl(4); /* dss_dot11_b */ caphdr->channel = htonl(hw->sniff_channel); caphdr->datarate = htonl(rxdesc->rate); caphdr->antenna = htonl(0); /* unknown */ caphdr->priority = htonl(0); /* unknown */ caphdr->ssi_type = htonl(3); /* rssi_raw */ caphdr->ssi_signal = htonl(rxdesc->signal); caphdr->ssi_noise = htonl(rxdesc->silence); caphdr->preamble = htonl(0); /* unknown */ caphdr->encoding = htonl(1); /* cck */ } /* Copy the 802.11 header to the skb (ctl frames may be less than a full header) */ datap = skb_put(skb, hdrlen); memcpy(datap, &(rxdesc->frame_control), hdrlen); /* If any, copy the data from the card to the skb */ if (datalen > 0) { datap = skb_put(skb, datalen); memcpy(datap, rxfrm->data, datalen); /* check for unencrypted stuff if WEP bit set. */ if (*(datap - hdrlen + 1) & 0x40) /* wep set */ if ((*(datap) == 0xaa) && (*(datap + 1) == 0xaa)) /* clear wep; it's the 802.2 header! */ *(datap - hdrlen + 1) &= 0xbf; } if (hw->sniff_fcs) { /* Set the FCS */ datap = skb_put(skb, WLAN_CRC_LEN); memset(datap, 0xff, WLAN_CRC_LEN); } /* pass it back up */ prism2sta_ev_rx(wlandev, skb); return; } /*---------------------------------------------------------------- * hfa384x_usbin_info * * At this point we have a successful received a Prism2 info frame. * * Arguments: * wlandev wlan device * usbin ptr to the usb transfer buffer * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbin_info(wlandevice_t *wlandev, hfa384x_usbin_t *usbin) { usbin->infofrm.info.framelen = le16_to_cpu(usbin->infofrm.info.framelen); prism2sta_ev_info(wlandev, &usbin->infofrm.info); } /*---------------------------------------------------------------- * hfa384x_usbout_callback * * Callback for URBs on the BULKOUT endpoint. * * Arguments: * urb ptr to the completed urb * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbout_callback(struct urb *urb) { wlandevice_t *wlandev = urb->context; hfa384x_usbout_t *usbout = urb->transfer_buffer; #ifdef DEBUG_USB dbprint_urb(urb); #endif if (wlandev && wlandev->netdev) { switch (urb->status) { case 0: hfa384x_usbout_tx(wlandev, usbout); break; case -EPIPE: { hfa384x_t *hw = wlandev->priv; netdev_warn(hw->wlandev->netdev, "%s tx pipe stalled: requesting reset\n", wlandev->netdev->name); if (!test_and_set_bit (WORK_TX_HALT, &hw->usb_flags)) schedule_work(&hw->usb_work); ++(wlandev->linux_stats.tx_errors); break; } case -EPROTO: case -ETIMEDOUT: case -EILSEQ: { hfa384x_t *hw = wlandev->priv; if (!test_and_set_bit (THROTTLE_TX, &hw->usb_flags) && !timer_pending(&hw->throttle)) { mod_timer(&hw->throttle, jiffies + THROTTLE_JIFFIES); } ++(wlandev->linux_stats.tx_errors); netif_stop_queue(wlandev->netdev); break; } case -ENOENT: case -ESHUTDOWN: /* Ignorable errors */ break; default: netdev_info(wlandev->netdev, "unknown urb->status=%d\n", urb->status); ++(wlandev->linux_stats.tx_errors); break; } /* switch */ } } /*---------------------------------------------------------------- * hfa384x_ctlxout_callback * * Callback for control data on the BULKOUT endpoint. * * Arguments: * urb ptr to the completed urb * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_ctlxout_callback(struct urb *urb) { hfa384x_t *hw = urb->context; int delete_resptimer = 0; int timer_ok = 1; int run_queue = 0; hfa384x_usbctlx_t *ctlx; unsigned long flags; pr_debug("urb->status=%d\n", urb->status); #ifdef DEBUG_USB dbprint_urb(urb); #endif if ((urb->status == -ESHUTDOWN) || (urb->status == -ENODEV) || (hw == NULL)) return; retry: spin_lock_irqsave(&hw->ctlxq.lock, flags); /* * Only one CTLX at a time on the "active" list, and * none at all if we are unplugged. However, we can * rely on the disconnect function to clean everything * up if someone unplugged the adapter. */ if (list_empty(&hw->ctlxq.active)) { spin_unlock_irqrestore(&hw->ctlxq.lock, flags); return; } /* * Having something on the "active" queue means * that we have timers to worry about ... */ if (del_timer(&hw->reqtimer) == 0) { if (hw->req_timer_done == 0) { /* * This timer was actually running while we * were trying to delete it. Let it terminate * gracefully instead. */ spin_unlock_irqrestore(&hw->ctlxq.lock, flags); goto retry; } } else { hw->req_timer_done = 1; } ctlx = get_active_ctlx(hw); if (urb->status == 0) { /* Request portion of a CTLX is successful */ switch (ctlx->state) { case CTLX_REQ_SUBMITTED: /* This OUT-ACK received before IN */ ctlx->state = CTLX_REQ_COMPLETE; break; case CTLX_RESP_COMPLETE: /* IN already received before this OUT-ACK, * so this command must now be complete. */ ctlx->state = CTLX_COMPLETE; unlocked_usbctlx_complete(hw, ctlx); run_queue = 1; break; default: /* This is NOT a valid CTLX "success" state! */ netdev_err(hw->wlandev->netdev, "Illegal CTLX[%d] success state(%s, %d) in OUT URB\n", le16_to_cpu(ctlx->outbuf.type), ctlxstr(ctlx->state), urb->status); break; } /* switch */ } else { /* If the pipe has stalled then we need to reset it */ if ((urb->status == -EPIPE) && !test_and_set_bit(WORK_TX_HALT, &hw->usb_flags)) { netdev_warn(hw->wlandev->netdev, "%s tx pipe stalled: requesting reset\n", hw->wlandev->netdev->name); schedule_work(&hw->usb_work); } /* If someone cancels the OUT URB then its status * should be either -ECONNRESET or -ENOENT. */ ctlx->state = CTLX_REQ_FAILED; unlocked_usbctlx_complete(hw, ctlx); delete_resptimer = 1; run_queue = 1; } delresp: if (delete_resptimer) { timer_ok = del_timer(&hw->resptimer); if (timer_ok != 0) hw->resp_timer_done = 1; } spin_unlock_irqrestore(&hw->ctlxq.lock, flags); if (!timer_ok && (hw->resp_timer_done == 0)) { spin_lock_irqsave(&hw->ctlxq.lock, flags); goto delresp; } if (run_queue) hfa384x_usbctlxq_run(hw); } /*---------------------------------------------------------------- * hfa384x_usbctlx_reqtimerfn * * Timer response function for CTLX request timeouts. If this * function is called, it means that the callback for the OUT * URB containing a Prism2.x XXX_Request was never called. * * Arguments: * data a ptr to the hfa384x_t * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbctlx_reqtimerfn(unsigned long data) { hfa384x_t *hw = (hfa384x_t *)data; unsigned long flags; spin_lock_irqsave(&hw->ctlxq.lock, flags); hw->req_timer_done = 1; /* Removing the hardware automatically empties * the active list ... */ if (!list_empty(&hw->ctlxq.active)) { /* * We must ensure that our URB is removed from * the system, if it hasn't already expired. */ hw->ctlx_urb.transfer_flags |= URB_ASYNC_UNLINK; if (usb_unlink_urb(&hw->ctlx_urb) == -EINPROGRESS) { hfa384x_usbctlx_t *ctlx = get_active_ctlx(hw); ctlx->state = CTLX_REQ_FAILED; /* This URB was active, but has now been * cancelled. It will now have a status of * -ECONNRESET in the callback function. * * We are cancelling this CTLX, so we're * not going to need to wait for a response. * The URB's callback function will check * that this timer is truly dead. */ if (del_timer(&hw->resptimer) != 0) hw->resp_timer_done = 1; } } spin_unlock_irqrestore(&hw->ctlxq.lock, flags); } /*---------------------------------------------------------------- * hfa384x_usbctlx_resptimerfn * * Timer response function for CTLX response timeouts. If this * function is called, it means that the callback for the IN * URB containing a Prism2.x XXX_Response was never called. * * Arguments: * data a ptr to the hfa384x_t * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbctlx_resptimerfn(unsigned long data) { hfa384x_t *hw = (hfa384x_t *)data; unsigned long flags; spin_lock_irqsave(&hw->ctlxq.lock, flags); hw->resp_timer_done = 1; /* The active list will be empty if the * adapter has been unplugged ... */ if (!list_empty(&hw->ctlxq.active)) { hfa384x_usbctlx_t *ctlx = get_active_ctlx(hw); if (unlocked_usbctlx_cancel_async(hw, ctlx) == 0) { spin_unlock_irqrestore(&hw->ctlxq.lock, flags); hfa384x_usbctlxq_run(hw); return; } } spin_unlock_irqrestore(&hw->ctlxq.lock, flags); } /*---------------------------------------------------------------- * hfa384x_usb_throttlefn * * * Arguments: * data ptr to hw * * Returns: * Nothing * * Side effects: * * Call context: * Interrupt ----------------------------------------------------------------*/ static void hfa384x_usb_throttlefn(unsigned long data) { hfa384x_t *hw = (hfa384x_t *)data; unsigned long flags; spin_lock_irqsave(&hw->ctlxq.lock, flags); /* * We need to check BOTH the RX and the TX throttle controls, * so we use the bitwise OR instead of the logical OR. */ pr_debug("flags=0x%lx\n", hw->usb_flags); if (!hw->wlandev->hwremoved && ((test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) && !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags)) | (test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) && !test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags)) )) { schedule_work(&hw->usb_work); } spin_unlock_irqrestore(&hw->ctlxq.lock, flags); } /*---------------------------------------------------------------- * hfa384x_usbctlx_submit * * Called from the doxxx functions to submit a CTLX to the queue * * Arguments: * hw ptr to the hw struct * ctlx ctlx structure to enqueue * * Returns: * -ENODEV if the adapter is unplugged * 0 * * Side effects: * * Call context: * process or interrupt ----------------------------------------------------------------*/ static int hfa384x_usbctlx_submit(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx) { unsigned long flags; spin_lock_irqsave(&hw->ctlxq.lock, flags); if (hw->wlandev->hwremoved) { spin_unlock_irqrestore(&hw->ctlxq.lock, flags); return -ENODEV; } ctlx->state = CTLX_PENDING; list_add_tail(&ctlx->list, &hw->ctlxq.pending); spin_unlock_irqrestore(&hw->ctlxq.lock, flags); hfa384x_usbctlxq_run(hw); return 0; } /*---------------------------------------------------------------- * hfa384x_usbout_tx * * At this point we have finished a send of a frame. Mark the URB * as available and call ev_alloc to notify higher layers we're * ready for more. * * Arguments: * wlandev wlan device * usbout ptr to the usb transfer buffer * * Returns: * nothing * * Side effects: * * Call context: * interrupt ----------------------------------------------------------------*/ static void hfa384x_usbout_tx(wlandevice_t *wlandev, hfa384x_usbout_t *usbout) { prism2sta_ev_alloc(wlandev); } /*---------------------------------------------------------------- * hfa384x_isgood_pdrcore * * Quick check of PDR codes. * * Arguments: * pdrcode PDR code number (host order) * * Returns: * zero not good. * one is good. * * Side effects: * * Call context: ----------------------------------------------------------------*/ static int hfa384x_isgood_pdrcode(u16 pdrcode) { switch (pdrcode) { case HFA384x_PDR_END_OF_PDA: case HFA384x_PDR_PCB_PARTNUM: case HFA384x_PDR_PDAVER: case HFA384x_PDR_NIC_SERIAL: case HFA384x_PDR_MKK_MEASUREMENTS: case HFA384x_PDR_NIC_RAMSIZE: case HFA384x_PDR_MFISUPRANGE: case HFA384x_PDR_CFISUPRANGE: case HFA384x_PDR_NICID: case HFA384x_PDR_MAC_ADDRESS: case HFA384x_PDR_REGDOMAIN: case HFA384x_PDR_ALLOWED_CHANNEL: case HFA384x_PDR_DEFAULT_CHANNEL: case HFA384x_PDR_TEMPTYPE: case HFA384x_PDR_IFR_SETTING: case HFA384x_PDR_RFR_SETTING: case HFA384x_PDR_HFA3861_BASELINE: case HFA384x_PDR_HFA3861_SHADOW: case HFA384x_PDR_HFA3861_IFRF: case HFA384x_PDR_HFA3861_CHCALSP: case HFA384x_PDR_HFA3861_CHCALI: case HFA384x_PDR_3842_NIC_CONFIG: case HFA384x_PDR_USB_ID: case HFA384x_PDR_PCI_ID: case HFA384x_PDR_PCI_IFCONF: case HFA384x_PDR_PCI_PMCONF: case HFA384x_PDR_RFENRGY: case HFA384x_PDR_HFA3861_MANF_TESTSP: case HFA384x_PDR_HFA3861_MANF_TESTI: /* code is OK */ return 1; break; default: if (pdrcode < 0x1000) { /* code is OK, but we don't know exactly what it is */ pr_debug("Encountered unknown PDR#=0x%04x, assuming it's ok.\n", pdrcode); return 1; } else { /* bad code */ pr_debug("Encountered unknown PDR#=0x%04x, (>=0x1000), assuming it's bad.\n", pdrcode); return 0; } break; } return 0; /* avoid compiler warnings */ }
gpl-2.0
samnazarko/vero-linux
drivers/usb/gadget/udc-core.c
57
14785
/** * udc.c - Core UDC Framework * * Copyright (C) 2010 Texas Instruments * Author: Felipe Balbi <balbi@ti.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 of * the License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/list.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/workqueue.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> /** * struct usb_udc - describes one usb device controller * @driver - the gadget driver pointer. For use by the class code * @dev - the child device to the actual controller * @gadget - the gadget. For use by the class code * @list - for use by the udc class driver * * This represents the internal data structure which is used by the UDC-class * to hold information about udc driver and gadget together. */ struct usb_udc { struct usb_gadget_driver *driver; struct usb_gadget *gadget; struct device dev; struct list_head list; }; static struct class *udc_class; static LIST_HEAD(udc_list); static DEFINE_MUTEX(udc_lock); /* ------------------------------------------------------------------------- */ #ifdef CONFIG_HAS_DMA int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { if (req->length == 0) return 0; if (req->num_sgs) { int mapped; mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mapped == 0) { dev_err(&gadget->dev, "failed to map SGs\n"); return -EFAULT; } req->num_mapped_sgs = mapped; } else { req->dma = dma_map_single(&gadget->dev, req->buf, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (dma_mapping_error(&gadget->dev, req->dma)) { dev_err(&gadget->dev, "failed to map buffer\n"); return -EFAULT; } } return 0; } EXPORT_SYMBOL_GPL(usb_gadget_map_request); void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { if (req->length == 0) return; if (req->num_mapped_sgs) { dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->num_mapped_sgs = 0; } else { dma_unmap_single(&gadget->dev, req->dma, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } } EXPORT_SYMBOL_GPL(usb_gadget_unmap_request); #endif /* CONFIG_HAS_DMA */ /* ------------------------------------------------------------------------- */ static void usb_gadget_state_work(struct work_struct *work) { struct usb_gadget *gadget = work_to_gadget(work); sysfs_notify(&gadget->dev.kobj, NULL, "state"); } void usb_gadget_set_state(struct usb_gadget *gadget, enum usb_device_state state) { gadget->state = state; schedule_work(&gadget->work); } EXPORT_SYMBOL_GPL(usb_gadget_set_state); /* ------------------------------------------------------------------------- */ /** * usb_gadget_udc_start - tells usb device controller to start up * @gadget: The gadget we want to get started * @driver: The driver we want to bind to @gadget * * This call is issued by the UDC Class driver when it's about * to register a gadget driver to the device controller, before * calling gadget driver's bind() method. * * It allows the controller to be powered off until strictly * necessary to have it powered on. * * Returns zero on success, else negative errno. */ static inline int usb_gadget_udc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { return gadget->ops->udc_start(gadget, driver); } /** * usb_gadget_udc_stop - tells usb device controller we don't need it anymore * @gadget: The device we want to stop activity * @driver: The driver to unbind from @gadget * * This call is issued by the UDC Class driver after calling * gadget driver's unbind() method. * * The details are implementation specific, but it can go as * far as powering off UDC completely and disable its data * line pullups. */ static inline void usb_gadget_udc_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { gadget->ops->udc_stop(gadget, driver); } /** * usb_udc_release - release the usb_udc struct * @dev: the dev member within usb_udc * * This is called by driver's core in order to free memory once the last * reference is released. */ static void usb_udc_release(struct device *dev) { struct usb_udc *udc; udc = container_of(dev, struct usb_udc, dev); dev_dbg(dev, "releasing '%s'\n", dev_name(dev)); kfree(udc); } static const struct attribute_group *usb_udc_attr_groups[]; static void usb_udc_nop_release(struct device *dev) { dev_vdbg(dev, "%s\n", __func__); } /** * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list * @parent: the parent device to this udc. Usually the controller driver's * device. * @gadget: the gadget to be added to the list. * @release: a gadget release function. * * Returns zero on success, negative errno otherwise. */ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)) { struct usb_udc *udc; int ret = -ENOMEM; udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) goto err1; dev_set_name(&gadget->dev, "gadget"); INIT_WORK(&gadget->work, usb_gadget_state_work); gadget->dev.parent = parent; #ifdef CONFIG_HAS_DMA dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask); gadget->dev.dma_parms = parent->dma_parms; gadget->dev.dma_mask = parent->dma_mask; #endif if (release) gadget->dev.release = release; else gadget->dev.release = usb_udc_nop_release; ret = device_register(&gadget->dev); if (ret) goto err2; device_initialize(&udc->dev); udc->dev.release = usb_udc_release; udc->dev.class = udc_class; udc->dev.groups = usb_udc_attr_groups; udc->dev.parent = parent; ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); if (ret) goto err3; udc->gadget = gadget; mutex_lock(&udc_lock); list_add_tail(&udc->list, &udc_list); ret = device_add(&udc->dev); if (ret) goto err4; usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); mutex_unlock(&udc_lock); return 0; err4: list_del(&udc->list); mutex_unlock(&udc_lock); err3: put_device(&udc->dev); err2: put_device(&gadget->dev); kfree(udc); err1: return ret; } EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release); /** * usb_add_gadget_udc - adds a new gadget to the udc class driver list * @parent: the parent device to this udc. Usually the controller * driver's device. * @gadget: the gadget to be added to the list * * Returns zero on success, negative errno otherwise. */ int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget) { return usb_add_gadget_udc_release(parent, gadget, NULL); } EXPORT_SYMBOL_GPL(usb_add_gadget_udc); static void usb_gadget_remove_driver(struct usb_udc *udc) { dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n", udc->gadget->name); kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); usb_gadget_disconnect(udc->gadget); udc->driver->disconnect(udc->gadget); udc->driver->unbind(udc->gadget); usb_gadget_udc_stop(udc->gadget, NULL); udc->driver = NULL; udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; } /** * usb_del_gadget_udc - deletes @udc from udc_list * @gadget: the gadget to be removed. * * This, will call usb_gadget_unregister_driver() if * the @udc is still busy. */ void usb_del_gadget_udc(struct usb_gadget *gadget) { struct usb_udc *udc = NULL; mutex_lock(&udc_lock); list_for_each_entry(udc, &udc_list, list) if (udc->gadget == gadget) goto found; dev_err(gadget->dev.parent, "gadget not registered.\n"); mutex_unlock(&udc_lock); return; found: dev_vdbg(gadget->dev.parent, "unregistering gadget\n"); list_del(&udc->list); mutex_unlock(&udc_lock); if (udc->driver) usb_gadget_remove_driver(udc); kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE); flush_work(&gadget->work); device_unregister(&udc->dev); device_unregister(&gadget->dev); } EXPORT_SYMBOL_GPL(usb_del_gadget_udc); /* ------------------------------------------------------------------------- */ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver) { int ret; dev_dbg(&udc->dev, "registering UDC driver [%s]\n", driver->function); udc->driver = driver; udc->dev.driver = &driver->driver; udc->gadget->dev.driver = &driver->driver; ret = driver->bind(udc->gadget, driver); if (ret) goto err1; ret = usb_gadget_udc_start(udc->gadget, driver); if (ret) { driver->unbind(udc->gadget); goto err1; } usb_gadget_connect(udc->gadget); kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); return 0; err1: if (ret != -EISNAM) dev_err(&udc->dev, "failed to start %s: %d\n", udc->driver->function, ret); udc->driver = NULL; udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; return ret; } int udc_attach_driver(const char *name, struct usb_gadget_driver *driver) { struct usb_udc *udc = NULL; int ret = -ENODEV; mutex_lock(&udc_lock); list_for_each_entry(udc, &udc_list, list) { ret = strcmp(name, dev_name(&udc->dev)); if (!ret) break; } if (ret) { ret = -ENODEV; goto out; } if (udc->driver) { ret = -EBUSY; goto out; } ret = udc_bind_to_driver(udc, driver); out: mutex_unlock(&udc_lock); return ret; } EXPORT_SYMBOL_GPL(udc_attach_driver); int usb_gadget_probe_driver(struct usb_gadget_driver *driver) { struct usb_udc *udc = NULL; int ret; if (!driver || !driver->bind || !driver->setup) return -EINVAL; mutex_lock(&udc_lock); list_for_each_entry(udc, &udc_list, list) { /* For now we take the first one */ if (!udc->driver) goto found; } pr_debug("couldn't find an available UDC\n"); mutex_unlock(&udc_lock); return -ENODEV; found: ret = udc_bind_to_driver(udc, driver); mutex_unlock(&udc_lock); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_probe_driver); int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { struct usb_udc *udc = NULL; int ret = -ENODEV; if (!driver || !driver->unbind) return -EINVAL; mutex_lock(&udc_lock); list_for_each_entry(udc, &udc_list, list) if (udc->driver == driver) { usb_gadget_remove_driver(udc); ret = 0; break; } mutex_unlock(&udc_lock); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver); /* ------------------------------------------------------------------------- */ static ssize_t usb_udc_srp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); if (sysfs_streq(buf, "1")) usb_gadget_wakeup(udc->gadget); return n; } static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store); static ssize_t usb_udc_softconn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); if (!udc->driver) { dev_err(dev, "soft-connect without a gadget driver\n"); return -EOPNOTSUPP; } if (sysfs_streq(buf, "connect")) { usb_gadget_udc_start(udc->gadget, udc->driver); usb_gadget_connect(udc->gadget); } else if (sysfs_streq(buf, "disconnect")) { usb_gadget_disconnect(udc->gadget); usb_gadget_udc_stop(udc->gadget, udc->driver); } else { dev_err(dev, "unsupported command '%s'\n", buf); return -EINVAL; } return n; } static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store); static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); struct usb_gadget *gadget = udc->gadget; return sprintf(buf, "%s\n", usb_state_string(gadget->state)); } static DEVICE_ATTR_RO(state); #define USB_UDC_SPEED_ATTR(name, param) \ ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \ return snprintf(buf, PAGE_SIZE, "%s\n", \ usb_speed_string(udc->gadget->param)); \ } \ static DEVICE_ATTR_RO(name) static USB_UDC_SPEED_ATTR(current_speed, speed); static USB_UDC_SPEED_ATTR(maximum_speed, max_speed); #define USB_UDC_ATTR(name) \ ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \ struct usb_gadget *gadget = udc->gadget; \ \ return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ } \ static DEVICE_ATTR_RO(name) static USB_UDC_ATTR(is_otg); static USB_UDC_ATTR(is_a_peripheral); static USB_UDC_ATTR(b_hnp_enable); static USB_UDC_ATTR(a_hnp_support); static USB_UDC_ATTR(a_alt_hnp_support); static struct attribute *usb_udc_attrs[] = { &dev_attr_srp.attr, &dev_attr_soft_connect.attr, &dev_attr_state.attr, &dev_attr_current_speed.attr, &dev_attr_maximum_speed.attr, &dev_attr_is_otg.attr, &dev_attr_is_a_peripheral.attr, &dev_attr_b_hnp_enable.attr, &dev_attr_a_hnp_support.attr, &dev_attr_a_alt_hnp_support.attr, NULL, }; static const struct attribute_group usb_udc_attr_group = { .attrs = usb_udc_attrs, }; static const struct attribute_group *usb_udc_attr_groups[] = { &usb_udc_attr_group, NULL, }; static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); int ret; ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name); if (ret) { dev_err(dev, "failed to add uevent USB_UDC_NAME\n"); return ret; } if (udc->driver) { ret = add_uevent_var(env, "USB_UDC_DRIVER=%s", udc->driver->function); if (ret) { dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n"); return ret; } } return 0; } static int __init usb_udc_init(void) { udc_class = class_create(THIS_MODULE, "udc"); if (IS_ERR(udc_class)) { pr_err("failed to create udc class --> %ld\n", PTR_ERR(udc_class)); return PTR_ERR(udc_class); } udc_class->dev_uevent = usb_udc_uevent; return 0; } subsys_initcall(usb_udc_init); static void __exit usb_udc_exit(void) { class_destroy(udc_class); } module_exit(usb_udc_exit); MODULE_DESCRIPTION("UDC Framework"); MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
kbc-developers/android_kernel_htc_msm8960
drivers/usb/otg/msm_otg_htc.c
57
117994
/* Copyright (c) 2009-2013, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/dma-mapping.h> #include <linux/usb.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> #include <linux/usb/gadget.h> #include <linux/usb/hcd.h> #include <linux/usb/quirks.h> #include <linux/usb/msm_hsusb.h> #include <linux/usb/msm_hsusb_hw.h> #include <linux/usb/htc_info.h> #include <linux/regulator/consumer.h> #include <linux/mfd/pm8xxx/pm8921-charger.h> #include <linux/mfd/pm8xxx/misc.h> #include <linux/power_supply.h> #include <linux/fs.h> #include <linux/string.h> #include <asm/uaccess.h> #include <mach/clk.h> #include <mach/msm_xo.h> #include <mach/msm_bus.h> #include <mach/rpm-regulator.h> #include <mach/cable_detect.h> #include <mach/board.h> #include <mach/board_htc.h> #define MSM_USB_BASE (motg->regs) #define DRIVER_NAME "msm_otg" extern int htc_battery_set_max_input_current(int target_ma); static int htc_otg_vbus; int USB_disabled; static struct msm_otg *the_msm_otg; static int re_enable_host; static int stop_usb_host; enum { NOT_ON_AUTOBOT, DOCK_ON_AUTOBOT, HTC_MODE_RUNNING }; enum { DEFAULT_STATE, TRY_STOP_HOST_STATE, STOP_HOST_STATE, TRY_ENABLE_HOST_STATE }; static DEFINE_MUTEX(smwork_sem); static DEFINE_MUTEX(notify_sem); static void send_usb_connect_notify(struct work_struct *w) { static struct t_usb_status_notifier *notifier; struct msm_otg *motg = container_of(w, struct msm_otg, notifier_work); struct usb_otg *otg; if (!motg) return; otg = motg->phy.otg; motg->connect_type_ready = 1; USBH_INFO("send connect type %d\n", motg->connect_type); mutex_lock(&notify_sem); list_for_each_entry(notifier, &g_lh_usb_notifier_list, notifier_link) { if (notifier->func != NULL) { /* Notify other drivers about connect type. */ /* use slow charging for unknown type*/ #if 0 if (motg->connect_type == CONNECT_TYPE_UNKNOWN) notifier->func(CONNECT_TYPE_USB); else #endif notifier->func(motg->connect_type); } } mutex_unlock(&notify_sem); if ((board_mfg_mode() == 5 || USB_disabled )&& motg->connect_type == CONNECT_TYPE_USB) { pm_runtime_put_noidle(otg->phy->dev); pm_runtime_suspend(otg->phy->dev); } if (motg->chg_type == USB_CDP_CHARGER) htc_battery_set_max_input_current(900); } int htc_usb_register_notifier(struct t_usb_status_notifier *notifier) { if (!notifier || !notifier->name || !notifier->func) return -EINVAL; mutex_lock(&notify_sem); list_add(&notifier->notifier_link, &g_lh_usb_notifier_list); mutex_unlock(&notify_sem); return 0; } int usb_is_connect_type_ready(void) { if (!the_msm_otg) return 0; return the_msm_otg->connect_type_ready; } EXPORT_SYMBOL(usb_is_connect_type_ready); int usb_get_connect_type(void) { if (!the_msm_otg) return 0; #ifdef CONFIG_MACH_VERDI_LTE if (the_msm_otg->connect_type == CONNECT_TYPE_USB_9V_AC) return CONNECT_TYPE_9V_AC; #endif return the_msm_otg->connect_type; } EXPORT_SYMBOL(usb_get_connect_type); static bool is_msm_otg_support_power_collapse(struct msm_otg *motg) { bool ret = true; if (!motg->pdata->ldo_power_collapse) return false; if (get_kernel_flag() & KERNEL_FLAG_SERIAL_HSL_ENABLE && (motg->pdata->ldo_power_collapse & POWER_COLLAPSE_LDO3V3)) { /* Not to turn off the L3 due to it owns the switch power */ motg->pdata->ldo_power_collapse &= ~POWER_COLLAPSE_LDO3V3; USBH_INFO("%s: kernel_flag_serial_hsl_enable." " POWER_COLLAPSE_LDO3V3 disabled\n", __func__); } if (motg->pdata->ldo_power_collapse & POWER_COLLAPSE_LDO3V3) USBH_INFO("%s: POWER_COLLAPSE_LDO3V3\n", __func__); if (motg->pdata->ldo_power_collapse & POWER_COLLAPSE_LDO1V8) USBH_INFO("%s: POWER_COLLAPSE_LDO1V8\n", __func__); if (board_mfg_mode() == 8) { ret = false; USBH_DEBUG("%s:No. under mfgkernel\n", __func__); } return ret; } #define ID_TIMER_FREQ (jiffies + msecs_to_jiffies(500)) #define ULPI_IO_TIMEOUT_USEC (10 * 1000) #define USB_PHY_3P3_VOL_MIN 3050000 /* uV */ #define USB_PHY_3P3_VOL_MAX 3300000 /* uV */ #define USB_PHY_3P3_HPM_LOAD 50000 /* uA */ #define USB_PHY_3P3_LPM_LOAD 4000 /* uA */ #define USB_PHY_1P8_VOL_MIN 1800000 /* uV */ #define USB_PHY_1P8_VOL_MAX 1800000 /* uV */ #define USB_PHY_1P8_HPM_LOAD 50000 /* uA */ #define USB_PHY_1P8_LPM_LOAD 4000 /* uA */ #define USB_PHY_VDD_DIG_VOL_NONE 0 /*uV */ #define USB_PHY_VDD_DIG_VOL_MIN 1045000 /* uV */ #define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */ static DECLARE_COMPLETION(pmic_vbus_init); static struct msm_otg *the_msm_otg; static bool debug_aca_enabled; static bool debug_bus_voting_enabled; static struct regulator *hsusb_3p3; static struct regulator *hsusb_1p8; static struct regulator *hsusb_vddcx; static struct regulator *mhl_usb_hs_switch; static struct power_supply *psy; static bool aca_id_turned_on; static inline bool aca_enabled(void) { #ifdef CONFIG_USB_MSM_ACA return true; #else return debug_aca_enabled; #endif } static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = { { /* VDD_CX CORNER Voting */ [VDD_NONE] = RPM_VREG_CORNER_NONE, [VDD_MIN] = RPM_VREG_CORNER_NOMINAL, [VDD_MAX] = RPM_VREG_CORNER_HIGH, }, { /* VDD_CX Voltage Voting */ [VDD_NONE] = USB_PHY_VDD_DIG_VOL_NONE, [VDD_MIN] = USB_PHY_VDD_DIG_VOL_MIN, [VDD_MAX] = USB_PHY_VDD_DIG_VOL_MAX, }, }; static int msm_hsusb_ldo_init(struct msm_otg *motg, int init) { int rc = 0; if (init) { hsusb_3p3 = devm_regulator_get(motg->phy.dev, "HSUSB_3p3"); if (IS_ERR(hsusb_3p3)) { USBH_ERR("unable to get hsusb 3p3\n"); return PTR_ERR(hsusb_3p3); } rc = regulator_set_voltage(hsusb_3p3, USB_PHY_3P3_VOL_MIN, USB_PHY_3P3_VOL_MAX); if (rc) { USBH_ERR("unable to set voltage level for" "hsusb 3p3\n"); return rc; } hsusb_1p8 = devm_regulator_get(motg->phy.dev, "HSUSB_1p8"); if (IS_ERR(hsusb_1p8)) { USBH_ERR("unable to get hsusb 1p8\n"); rc = PTR_ERR(hsusb_1p8); goto put_3p3_lpm; } rc = regulator_set_voltage(hsusb_1p8, USB_PHY_1P8_VOL_MIN, USB_PHY_1P8_VOL_MAX); if (rc) { dev_err(motg->otg.dev, "unable to set voltage level for" "hsusb 1p8\n"); goto put_1p8; } return 0; } put_1p8: regulator_set_voltage(hsusb_1p8, 0, USB_PHY_1P8_VOL_MAX); put_3p3_lpm: regulator_set_voltage(hsusb_3p3, 0, USB_PHY_3P3_VOL_MAX); return rc; } static int msm_hsusb_config_vddcx(int high) { struct msm_otg *motg = the_msm_otg; enum usb_vdd_type vdd_type = motg->vdd_type; int max_vol = vdd_val[vdd_type][VDD_MAX]; int min_vol; int ret; min_vol = vdd_val[vdd_type][!!high]; ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol); if (ret) { USBH_ERR("%s: unable to set the voltage for regulator " "HSUSB_VDDCX\n", __func__); return ret; } USBH_DEBUG("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol); return ret; } static int msm_hsusb_ldo_enable(struct msm_otg *motg, int on) { int ret = 0; if (IS_ERR(hsusb_1p8)) { USBH_ERR("%s: HSUSB_1p8 is not initialized\n", __func__); return -ENODEV; } if (IS_ERR(hsusb_3p3)) { USBH_ERR("%s: HSUSB_3p3 is not initialized\n", __func__); return -ENODEV; } if (on) { ret = regulator_set_optimum_mode(hsusb_1p8, USB_PHY_1P8_HPM_LOAD); if (ret < 0) { USBH_ERR("%s: Unable to set HPM of the regulator:" "HSUSB_1p8\n", __func__); return ret; } else USBH_INFO("%s: hsusb_1p8: %duA\n", __func__, USB_PHY_1P8_HPM_LOAD); ret = regulator_enable(hsusb_1p8); if (ret) { USBH_ERR("%s: unable to enable the hsusb 1p8\n", __func__); regulator_set_optimum_mode(hsusb_1p8, 0); return ret; } ret = regulator_set_optimum_mode(hsusb_3p3, USB_PHY_3P3_HPM_LOAD); if (ret < 0) { USBH_ERR("%s: Unable to set HPM of the regulator:" "HSUSB_3p3\n", __func__); regulator_set_optimum_mode(hsusb_1p8, 0); regulator_disable(hsusb_1p8); return ret; } else USBH_INFO("%s: hsusb_3p3: %duA\n", __func__, USB_PHY_3P3_HPM_LOAD); ret = regulator_enable(hsusb_3p3); if (ret) { USBH_ERR("%s: unable to enable the hsusb 3p3\n", __func__); regulator_set_optimum_mode(hsusb_3p3, 0); regulator_set_optimum_mode(hsusb_1p8, 0); regulator_disable(hsusb_1p8); return ret; } } else { if (motg->pdata->ldo_power_collapse & POWER_COLLAPSE_LDO1V8) { ret = regulator_disable(hsusb_1p8); if (ret) { USBH_ERR("%s: unable to disable the hsusb 1p8\n", __func__); return ret; } ret = regulator_set_optimum_mode(hsusb_1p8, 0); if (ret < 0) USBH_ERR("%s: Unable to set LPM of the regulator:" "HSUSB_1p8\n", __func__); else USBH_INFO("%s: hsusb_1p8: 0uA\n", __func__); } if (motg->pdata->ldo_power_collapse & POWER_COLLAPSE_LDO3V3) { ret = regulator_disable(hsusb_3p3); if (ret) { USBH_ERR("%s: unable to disable the hsusb 3p3\n", __func__); return ret; } ret = regulator_set_optimum_mode(hsusb_3p3, 0); if (ret < 0) USBH_ERR("%s: Unable to set LPM of the regulator:" "HSUSB_3p3\n", __func__); else USBH_INFO("%s: hsusb_3p3: 0uA\n", __func__); } } USBH_DEBUG("reg (%s)\n", on ? "HPM" : "LPM"); return ret < 0 ? ret : 0; } static const char *state_string(enum usb_otg_state state) { switch (state) { case OTG_STATE_A_IDLE: return "a_idle"; case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; case OTG_STATE_A_HOST: return "a_host"; case OTG_STATE_A_SUSPEND: return "a_suspend"; case OTG_STATE_A_PERIPHERAL: return "a_peripheral"; case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall"; case OTG_STATE_A_VBUS_ERR: return "a_vbus_err"; case OTG_STATE_B_IDLE: return "b_idle"; case OTG_STATE_B_SRP_INIT: return "b_srp_init"; case OTG_STATE_B_PERIPHERAL: return "b_peripheral"; case OTG_STATE_B_WAIT_ACON: return "b_wait_acon"; case OTG_STATE_B_HOST: return "b_host"; default: return "UNDEFINED"; } } static const char *chg_state_string(enum usb_chg_state state) { switch (state) { case USB_CHG_STATE_UNDEFINED: return "CHG_STATE_UNDEFINED"; case USB_CHG_STATE_WAIT_FOR_DCD: return "CHG_WAIT_FOR_DCD"; case USB_CHG_STATE_DCD_DONE: return "CHG_DCD_DONE"; case USB_CHG_STATE_PRIMARY_DONE: return "CHG_PRIMARY_DONE"; case USB_CHG_STATE_SECONDARY_DONE: return "CHG_SECONDARY_DONE"; case USB_CHG_STATE_DETECTED: return "CHG_DETECTED"; default: return "UNDEFINED"; } } static void msm_hsusb_mhl_switch_enable(struct msm_otg *motg, bool on) { struct msm_otg_platform_data *pdata = motg->pdata; if (!pdata->mhl_enable) return; if (!mhl_usb_hs_switch) { pr_err("%s: mhl_usb_hs_switch is NULL.\n", __func__); return; } if (on) { if (regulator_enable(mhl_usb_hs_switch)) pr_err("unable to enable mhl_usb_hs_switch\n"); } else { regulator_disable(mhl_usb_hs_switch); } } static int ulpi_read(struct usb_phy *phy, u32 reg) { struct msm_otg *motg = container_of(phy, struct msm_otg, phy); int cnt = 0; if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) udelay(200); /* initiate read operation */ writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg), USB_ULPI_VIEWPORT); /* wait for completion */ while (cnt < ULPI_IO_TIMEOUT_USEC) { if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN)) break; udelay(1); cnt++; } if (cnt >= ULPI_IO_TIMEOUT_USEC) { USBH_WARNING("ulpi_read: timeout %08x reg: 0x%x\n", readl(USB_ULPI_VIEWPORT), reg); return -ETIMEDOUT; } return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT)); } static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg) { struct msm_otg *motg = container_of(phy, struct msm_otg, phy); int cnt = 0; if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) udelay(200); /* initiate write operation */ writel(ULPI_RUN | ULPI_WRITE | ULPI_ADDR(reg) | ULPI_DATA(val), USB_ULPI_VIEWPORT); /* wait for completion */ while (cnt < ULPI_IO_TIMEOUT_USEC) { if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN)) break; udelay(1); cnt++; } if (cnt >= ULPI_IO_TIMEOUT_USEC) { USBH_WARNING("ulpi_write: timeout reg: 0x%x ,val: 0x%x\n", reg, val); return -ETIMEDOUT; } return 0; } ssize_t otg_show_usb_phy_setting(char *buf) { struct msm_otg *motg = the_msm_otg; unsigned length = 0; int i; for (i = 0; i <= 0x14; i++) length += sprintf(buf + length, "0x%x = 0x%x\n", i, ulpi_read(&motg->phy, i)); for (i = 0x30; i <= 0x37; i++) length += sprintf(buf + length, "0x%x = 0x%x\n", i, ulpi_read(&motg->phy, i)); if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY) { for (i = 0x80; i <= 0x83; i++) length += sprintf(buf + length, "0x%x = 0x%x\n", i, ulpi_read(&motg->phy, i)); } return length; } ssize_t otg_store_usb_phy_setting(const char *buf, size_t count) { struct msm_otg *motg = the_msm_otg; char *token[10]; unsigned long reg; unsigned long value; int i; USBH_INFO("%s\n", buf); for (i = 0; i < 2; i++) token[i] = strsep((char **)&buf, " "); i = strict_strtoul(token[0], 16, (unsigned long *)&reg); if (i < 0) { USBH_ERR("%s: reg %d\n", __func__, i); return 0; } i = strict_strtoul(token[1], 16, (unsigned long *)&value); if (i < 0) { USBH_ERR("%s: value %d\n", __func__, i); return 0; } USBH_INFO("Set 0x%02lx = 0x%02lx\n", reg, value); ulpi_write(&motg->phy, value, reg); return count; } static struct usb_phy_io_ops msm_otg_io_ops = { .read = ulpi_read, .write = ulpi_write, }; static void ulpi_init(struct msm_otg *motg) { struct msm_otg_platform_data *pdata = motg->pdata; int *seq = pdata->phy_init_seq; if (!seq) return; while (seq[0] >= 0) { USBH_INFO("ulpi: write 0x%02x to 0x%02x\n", seq[0], seq[1]); ulpi_write(&motg->phy, seq[0], seq[1]); seq += 2; } } static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert) { int ret; if (IS_ERR(motg->clk)) return 0; if (assert) { ret = clk_reset(motg->clk, CLK_RESET_ASSERT); if (ret) USBH_ERR("usb hs_clk assert failed\n"); } else { ret = clk_reset(motg->clk, CLK_RESET_DEASSERT); if (ret) USBH_ERR("usb hs_clk deassert failed\n"); } return ret; } static int msm_otg_phy_clk_reset(struct msm_otg *motg) { int ret; if (IS_ERR(motg->phy_reset_clk)) return 0; ret = clk_reset(motg->phy_reset_clk, CLK_RESET_ASSERT); if (ret) { USBH_ERR("usb phy clk assert failed\n"); return ret; } usleep_range(10000, 12000); ret = clk_reset(motg->phy_reset_clk, CLK_RESET_DEASSERT); if (ret) USBH_ERR("usb phy clk deassert failed\n"); return ret; } static int msm_otg_phy_reset(struct msm_otg *motg) { u32 val; int ret; int retries; ret = msm_otg_link_clk_reset(motg, 1); if (ret) return ret; ret = msm_otg_phy_clk_reset(motg); if (ret) return ret; ret = msm_otg_link_clk_reset(motg, 0); if (ret) return ret; val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK; writel(val | PORTSC_PTS_ULPI, USB_PORTSC); for (retries = 3; retries > 0; retries--) { ret = ulpi_write(&motg->phy, ULPI_FUNC_CTRL_SUSPENDM, ULPI_CLR(ULPI_FUNC_CTRL)); if (!ret) break; ret = msm_otg_phy_clk_reset(motg); if (ret) return ret; } if (!retries) return -ETIMEDOUT; /* This reset calibrates the phy, if the above write succeeded */ ret = msm_otg_phy_clk_reset(motg); if (ret) return ret; for (retries = 3; retries > 0; retries--) { ret = ulpi_read(&motg->phy, ULPI_DEBUG); if (ret != -ETIMEDOUT) break; ret = msm_otg_phy_clk_reset(motg); if (ret) return ret; } if (!retries) return -ETIMEDOUT; USBH_INFO("phy_reset: success\n"); return 0; } #define LINK_RESET_TIMEOUT_USEC (250 * 1000) static int msm_otg_link_reset(struct msm_otg *motg) { int cnt = 0; writel_relaxed(USBCMD_RESET, USB_USBCMD); while (cnt < LINK_RESET_TIMEOUT_USEC) { if (!(readl_relaxed(USB_USBCMD) & USBCMD_RESET)) break; udelay(1); cnt++; } if (cnt >= LINK_RESET_TIMEOUT_USEC) return -ETIMEDOUT; /* select ULPI phy */ writel_relaxed(0x80000000, USB_PORTSC); writel_relaxed(0x0, USB_AHBBURST); writel_relaxed(0x08, USB_AHBMODE); return 0; } static int msm_otg_reset(struct usb_phy *phy) { struct msm_otg *motg = container_of(phy, struct msm_otg, phy); struct msm_otg_platform_data *pdata = motg->pdata; int ret; u32 val = 0; u32 ulpi_val = 0; USBH_INFO("%s\n", __func__); /* * USB PHY and Link reset also reset the USB BAM. * Thus perform reset operation only once to avoid * USB BAM reset on other cases e.g. USB cable disconnections. */ if (pdata->disable_reset_on_disconnect) { if (motg->reset_counter) return 0; else motg->reset_counter++; } if (!IS_ERR(motg->clk)) clk_prepare_enable(motg->clk); ret = msm_otg_phy_reset(motg); if (ret) { USBH_ERR("phy_reset failed\n"); return ret; } /* suppress id signal from phy */ if (readl(USB_OTGSC) & OTGSC_IDPU) writel(readl(USB_OTGSC) & ~OTGSC_IDPU, USB_OTGSC); aca_id_turned_on = false; ret = msm_otg_link_reset(motg); if (ret) { dev_err(phy->dev, "link reset failed\n"); return ret; } msleep(100); ulpi_init(motg); /* Ensure that RESET operation is completed before turning off clock */ mb(); if (!IS_ERR(motg->clk)) clk_disable_unprepare(motg->clk); if (pdata->otg_control == OTG_PHY_CONTROL) { val = readl_relaxed(USB_OTGSC); if (pdata->mode == USB_OTG) { ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID; val |= OTGSC_IDIE | OTGSC_BSVIE; } else if (pdata->mode == USB_PERIPHERAL) { ulpi_val = ULPI_INT_SESS_VALID; val |= OTGSC_BSVIE; } writel_relaxed(val, USB_OTGSC); ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_RISE); ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL); } else if (pdata->otg_control == OTG_PMIC_CONTROL) { ulpi_write(phy, OTG_COMP_DISABLE, ULPI_SET(ULPI_PWR_CLK_MNG_REG)); /* Enable PMIC pull-up */ pm8xxx_usb_id_pullup(1); } return 0; } static const char *timer_string(int bit) { switch (bit) { case A_WAIT_VRISE: return "a_wait_vrise"; case A_WAIT_VFALL: return "a_wait_vfall"; case B_SRP_FAIL: return "b_srp_fail"; case A_WAIT_BCON: return "a_wait_bcon"; case A_AIDL_BDIS: return "a_aidl_bdis"; case A_BIDL_ADIS: return "a_bidl_adis"; case B_ASE0_BRST: return "b_ase0_brst"; case A_TST_MAINT: return "a_tst_maint"; case B_TST_SRP: return "b_tst_srp"; case B_TST_CONFIG: return "b_tst_config"; default: return "UNDEFINED"; } } static enum hrtimer_restart msm_otg_timer_func(struct hrtimer *hrtimer) { struct msm_otg *motg = container_of(hrtimer, struct msm_otg, timer); switch (motg->active_tmout) { case A_WAIT_VRISE: /* TODO: use vbus_vld interrupt */ set_bit(A_VBUS_VLD, &motg->inputs); break; case A_TST_MAINT: /* OTG PET: End session after TA_TST_MAINT */ set_bit(A_BUS_DROP, &motg->inputs); break; case B_TST_SRP: /* * OTG PET: Initiate SRP after TB_TST_SRP of * previous session end. */ set_bit(B_BUS_REQ, &motg->inputs); break; case B_TST_CONFIG: clear_bit(A_CONN, &motg->inputs); break; default: set_bit(motg->active_tmout, &motg->tmouts); } pr_debug("expired %s timer\n", timer_string(motg->active_tmout)); queue_work(system_nrt_wq, &motg->sm_work); return HRTIMER_NORESTART; } static void msm_otg_del_timer(struct msm_otg *motg) { int bit = motg->active_tmout; pr_debug("deleting %s timer. remaining %lld msec\n", timer_string(bit), div_s64(ktime_to_us(hrtimer_get_remaining( &motg->timer)), 1000)); hrtimer_cancel(&motg->timer); clear_bit(bit, &motg->tmouts); } static void msm_otg_start_timer(struct msm_otg *motg, int time, int bit) { clear_bit(bit, &motg->tmouts); motg->active_tmout = bit; pr_debug("starting %s timer\n", timer_string(bit)); hrtimer_start(&motg->timer, ktime_set(time / 1000, (time % 1000) * 1000000), HRTIMER_MODE_REL); } static void msm_otg_init_timer(struct msm_otg *motg) { hrtimer_init(&motg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); motg->timer.function = msm_otg_timer_func; } static int msm_otg_start_hnp(struct usb_otg *otg) { struct msm_otg *motg = container_of(otg->phy, struct msm_otg, phy); if (otg->phy->state != OTG_STATE_A_HOST) { pr_err("HNP can not be initiated in %s state\n", otg_state_string(otg->phy->state)); return -EINVAL; } pr_debug("A-Host: HNP initiated\n"); clear_bit(A_BUS_REQ, &motg->inputs); queue_work(system_nrt_wq, &motg->sm_work); return 0; } static int msm_otg_start_srp(struct usb_otg *otg) { struct msm_otg *motg = container_of(otg->phy, struct msm_otg, phy); u32 val; int ret = 0; if (otg->phy->state != OTG_STATE_B_IDLE) { pr_err("SRP can not be initiated in %s state\n", otg_state_string(otg->phy->state)); ret = -EINVAL; goto out; } if ((jiffies - motg->b_last_se0_sess) < msecs_to_jiffies(TB_SRP_INIT)) { pr_debug("initial conditions of SRP are not met. Try again" "after some time\n"); ret = -EAGAIN; goto out; } pr_debug("B-Device SRP started\n"); /* * PHY won't pull D+ high unless it detects Vbus valid. * Since by definition, SRP is only done when Vbus is not valid, * software work-around needs to be used to spoof the PHY into * thinking it is valid. This can be done using the VBUSVLDEXTSEL and * VBUSVLDEXT register bits. */ ulpi_write(otg->phy, 0x03, 0x97); /* * Harware auto assist data pulsing: Data pulse is given * for 7msec; wait for vbus */ val = readl_relaxed(USB_OTGSC); writel_relaxed((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP, USB_OTGSC); /* VBUS plusing is obsoleted in OTG 2.0 supplement */ out: return ret; } static void msm_otg_host_hnp_enable(struct usb_otg *otg, bool enable) { struct usb_hcd *hcd = bus_to_hcd(otg->host); struct usb_device *rhub = otg->host->root_hub; if (enable) { pm_runtime_disable(&rhub->dev); rhub->state = USB_STATE_NOTATTACHED; hcd->driver->bus_suspend(hcd); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); } else { usb_remove_hcd(hcd); msm_otg_reset(otg->phy); usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); } } static int msm_otg_set_suspend(struct usb_phy *phy, int suspend) { struct msm_otg *motg = container_of(phy, struct msm_otg, phy); if (aca_enabled() || (test_bit(ID, &motg->inputs) && !test_bit(ID_A, &motg->inputs))) return 0; if (atomic_read(&motg->in_lpm) == suspend) return 0; if (suspend) { switch (phy->state) { case OTG_STATE_A_WAIT_BCON: if (TA_WAIT_BCON > 0) break; /* fall through */ case OTG_STATE_A_HOST: pr_debug("host bus suspend\n"); clear_bit(A_BUS_REQ, &motg->inputs); queue_work(system_nrt_wq, &motg->sm_work); break; case OTG_STATE_B_PERIPHERAL: pr_debug("peripheral bus suspend\n"); if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND)) break; set_bit(A_BUS_SUSPEND, &motg->inputs); queue_work(system_nrt_wq, &motg->sm_work); break; default: break; } } else { switch (phy->state) { case OTG_STATE_A_SUSPEND: /* Remote wakeup or resume */ set_bit(A_BUS_REQ, &motg->inputs); phy->state = OTG_STATE_A_HOST; /* ensure hardware is not in low power mode */ pm_runtime_resume(phy->dev); break; case OTG_STATE_B_PERIPHERAL: pr_debug("peripheral bus resume\n"); if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND)) break; clear_bit(A_BUS_SUSPEND, &motg->inputs); queue_work(system_nrt_wq, &motg->sm_work); break; default: break; } } return 0; } #define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000) #define PHY_RESUME_TIMEOUT_USEC (100 * 1000) #ifdef CONFIG_PM_SLEEP static int msm_otg_suspend(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; struct usb_bus *bus = phy->otg->host; struct usb_otg *otg = motg->phy.otg; struct msm_otg_platform_data *pdata = motg->pdata; int cnt = 0; bool host_bus_suspend, device_bus_suspend, dcp, prop_charger; u32 phy_ctrl_val = 0, cmd_val; unsigned ret; u32 portsc; if (atomic_read(&motg->in_lpm)) return 0; USBH_INFO("%s\n", __func__); disable_irq(motg->irq); host_bus_suspend = phy->otg->host && !test_bit(ID, &motg->inputs); device_bus_suspend = phy->otg->gadget && test_bit(ID, &motg->inputs) && test_bit(A_BUS_SUSPEND, &motg->inputs) && motg->caps & ALLOW_LPM_ON_DEV_SUSPEND; dcp = motg->chg_type == USB_DCP_CHARGER; prop_charger = motg->chg_type == USB_PROPRIETARY_CHARGER; if (test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend && !dcp && !prop_charger) { enable_irq(motg->irq); return -EBUSY; } /* * Chipidea 45-nm PHY suspend sequence: * * Interrupt Latch Register auto-clear feature is not present * in all PHY versions. Latch register is clear on read type. * Clear latch register to avoid spurious wakeup from * low power mode (LPM). * * PHY comparators are disabled when PHY enters into low power * mode (LPM). Keep PHY comparators ON in LPM only when we expect * VBUS/Id notifications from USB PHY. Otherwise turn off USB * PHY comparators. This save significant amount of power. * * PLL is not turned off when PHY enters into low power mode (LPM). * Disable PLL for maximum power savings. */ if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) { ulpi_read(phy, 0x14); if (pdata->otg_control == OTG_PHY_CONTROL) ulpi_write(phy, 0x01, 0x30); ulpi_write(phy, 0x08, 0x09); } /* Set the PHCD bit, only if it is not set by the controller. * PHY may take some time or even fail to enter into low power * mode (LPM). Hence poll for 500 msec and reset the PHY and link * in failure case. */ portsc = readl_relaxed(USB_PORTSC); if (!(portsc & PORTSC_PHCD)) { writel_relaxed(portsc | PORTSC_PHCD, USB_PORTSC); while (cnt < PHY_SUSPEND_TIMEOUT_USEC) { if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD) break; udelay(1); cnt++; } } if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) { USBH_WARNING("Unable to suspend PHY\n"); msm_otg_reset(phy); enable_irq(motg->irq); return -ETIMEDOUT; } /* * PHY has capability to generate interrupt asynchronously in low * power mode (LPM). This interrupt is level triggered. So USB IRQ * line must be disabled till async interrupt enable bit is cleared * in USBCMD register. Assert STP (ULPI interface STOP signal) to * block data communication from PHY. * * PHY retention mode is disallowed while entering to LPM with wall * charger connected. But PHY is put into suspend mode. Hence * enable asynchronous interrupt to detect charger disconnection when * PMIC notifications are unavailable. */ if (otg->phy->state == OTG_STATE_A_WAIT_BCON) { USBH_INFO("%s:enable the ASYNC_INTR\n", __func__); cmd_val = readl_relaxed(USB_USBCMD); if (host_bus_suspend || device_bus_suspend || (motg->pdata->otg_control == OTG_PHY_CONTROL)) cmd_val |= ASYNC_INTR_CTRL | ULPI_STP_CTRL; else cmd_val |= ULPI_STP_CTRL; writel_relaxed(cmd_val, USB_USBCMD); } else { /* Remove ASYNC_INTR_CTRL to avoid random wakeup */ /* for normal case*/ writel(readl(USB_USBCMD) | ULPI_STP_CTRL, USB_USBCMD); } /* * BC1.2 spec mandates PD to enable VDP_SRC when charging from DCP. * PHY retention and collapse can not happen with VDP_SRC enabled. */ if (motg->caps & ALLOW_PHY_RETENTION && !host_bus_suspend && !device_bus_suspend && !dcp) { phy_ctrl_val = readl_relaxed(USB_PHY_CTRL); if (motg->pdata->otg_control == OTG_PHY_CONTROL) /* Enable PHY HV interrupts to wake MPM/Link */ phy_ctrl_val |= (PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN); /* for 8064 MPM USB PHY ID pin issue. Disable level shift*/ phy_ctrl_val &= ~(1<<10); writel_relaxed(phy_ctrl_val & ~PHY_RETEN, USB_PHY_CTRL); motg->lpm_flags |= PHY_RETENTIONED; } /* Ensure that above operation is completed before turning off clocks */ mb(); if (!motg->pdata->core_clk_always_on_workaround) { clk_disable_unprepare(motg->pclk); clk_disable_unprepare(motg->core_clk); } /* usb phy no more require TCXO clock, hence vote for TCXO disable */ if (!host_bus_suspend) { ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_OFF); if (ret) dev_err(phy->dev, "%s failed to devote for " "TCXO D0 buffer%d\n", __func__, ret); else motg->lpm_flags |= XO_SHUTDOWN; } if (motg->caps & ALLOW_PHY_POWER_COLLAPSE && !host_bus_suspend && !dcp) { msm_hsusb_ldo_enable(motg, 0); motg->lpm_flags |= PHY_PWR_COLLAPSED; } if ((motg->lpm_flags & PHY_RETENTIONED) || (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY && !host_bus_suspend && !device_bus_suspend && !dcp)) { msm_hsusb_config_vddcx(0); msm_hsusb_mhl_switch_enable(motg, 0); } if (device_may_wakeup(phy->dev)) { enable_irq_wake(motg->irq); if (motg->pdata->pmic_id_irq) enable_irq_wake(motg->pdata->pmic_id_irq); } if (bus) clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags); atomic_set(&motg->in_lpm, 1); enable_irq(motg->irq); USBH_INFO("USB in low power mode\n"); return 0; } int msm_otg_setclk(int on) { if (on) { if (!the_msm_otg->pdata->core_clk_always_on_workaround) { clk_prepare_enable(the_msm_otg->core_clk); } } else { if (!the_msm_otg->pdata->core_clk_always_on_workaround) { clk_disable_unprepare(the_msm_otg->core_clk); } } return 0; } EXPORT_SYMBOL(msm_otg_setclk); static int msm_otg_resume(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; struct usb_bus *bus = phy->otg->host; struct usb_otg *otg = motg->phy.otg; int cnt = 0; unsigned temp; u32 phy_ctrl_val = 0; unsigned ret; if (!atomic_read(&motg->in_lpm)) return 0; USBH_INFO("%s\n", __func__); /* Vote for TCXO when waking up the phy */ if (motg->lpm_flags & XO_SHUTDOWN) { ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON); if (ret) dev_err(phy->dev, "%s failed to vote for " "TCXO D0 buffer%d\n", __func__, ret); motg->lpm_flags &= ~XO_SHUTDOWN; } if (!motg->pdata->core_clk_always_on_workaround) { clk_prepare_enable(motg->core_clk); clk_prepare_enable(motg->pclk); } if (motg->lpm_flags & PHY_PWR_COLLAPSED) { msm_hsusb_ldo_enable(motg, 1); motg->lpm_flags &= ~PHY_PWR_COLLAPSED; USBH_DEBUG("exit phy power collapse...\n"); } if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) { msm_hsusb_mhl_switch_enable(motg, 1); msm_hsusb_config_vddcx(1); } if (motg->lpm_flags & PHY_RETENTIONED) { msm_hsusb_mhl_switch_enable(motg, 1); msm_hsusb_config_vddcx(1); phy_ctrl_val = readl_relaxed(USB_PHY_CTRL); phy_ctrl_val |= PHY_RETEN; if (motg->pdata->otg_control == OTG_PHY_CONTROL) /* Disable PHY HV interrupts */ phy_ctrl_val &= ~(PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN); /* for 8064 MPM USB PHY ID pin issue. Disable level shift*/ phy_ctrl_val |= (1<<10); writel_relaxed(phy_ctrl_val, USB_PHY_CTRL); motg->lpm_flags &= ~PHY_RETENTIONED; } temp = readl(USB_USBCMD); if (otg->phy->state != OTG_STATE_A_WAIT_BCON) { USBH_INFO("%s:disable the ASYNC_INTR\n", __func__); temp &= ~ASYNC_INTR_CTRL; } temp &= ~ULPI_STP_CTRL; writel(temp, USB_USBCMD); /* * PHY comes out of low power mode (LPM) in case of wakeup * from asynchronous interrupt. */ if (!(readl(USB_PORTSC) & PORTSC_PHCD)) goto skip_phy_resume; writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC); while (cnt < PHY_RESUME_TIMEOUT_USEC) { if (!(readl(USB_PORTSC) & PORTSC_PHCD)) break; udelay(1); cnt++; } if (cnt >= PHY_RESUME_TIMEOUT_USEC) { /* * This is a fatal error. Reset the link and * PHY. USB state can not be restored. Re-insertion * of USB cable is the only way to get USB working. */ USBH_ERR("Unable to resume USB." "Re-plugin the cable\n"); msm_otg_reset(phy); } skip_phy_resume: if (device_may_wakeup(phy->dev)) { disable_irq_wake(motg->irq); if (motg->pdata->pmic_id_irq) disable_irq_wake(motg->pdata->pmic_id_irq); } if (bus) set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags); atomic_set(&motg->in_lpm, 0); if (motg->async_int) { motg->async_int = 0; enable_irq(motg->irq); } USBH_INFO("USB exited from low power mode\n"); return 0; } #endif static int msm_otg_notify_chg_type(struct msm_otg *motg) { static int charger_type; /* * TODO * Unify OTG driver charger types and power supply charger types */ if (charger_type == motg->chg_type) return 0; if (motg->chg_type == USB_SDP_CHARGER) charger_type = POWER_SUPPLY_TYPE_USB; else if (motg->chg_type == USB_CDP_CHARGER) charger_type = POWER_SUPPLY_TYPE_USB_CDP; else if (motg->chg_type == USB_DCP_CHARGER || motg->chg_type == USB_PROPRIETARY_CHARGER) charger_type = POWER_SUPPLY_TYPE_USB_DCP; else if ((motg->chg_type == USB_ACA_DOCK_CHARGER || motg->chg_type == USB_ACA_A_CHARGER || motg->chg_type == USB_ACA_B_CHARGER || motg->chg_type == USB_ACA_C_CHARGER)) charger_type = POWER_SUPPLY_TYPE_USB_ACA; else charger_type = POWER_SUPPLY_TYPE_BATTERY; return pm8921_set_usb_power_supply_type(charger_type); } static int msm_otg_notify_power_supply(struct msm_otg *motg, unsigned mA) { if (!psy) goto psy_not_supported; if (motg->cur_power == 0 && mA > 0) { /* Enable charging */ if (power_supply_set_online(psy, true)) goto psy_not_supported; } else if (motg->cur_power > 0 && mA == 0) { /* Disable charging */ if (power_supply_set_online(psy, false)) goto psy_not_supported; return 0; } /* Set max current limit */ if (power_supply_set_current_limit(psy, 1000*mA)) goto psy_not_supported; return 0; psy_not_supported: dev_dbg(motg->phy.dev, "Power Supply doesn't support USB charger\n"); return -ENXIO; } static void msm_otg_notify_charger(struct msm_otg *motg, unsigned mA) { struct usb_gadget *g = motg->phy.otg->gadget; if (g && g->is_a_peripheral) return; if ((motg->chg_type == USB_ACA_DOCK_CHARGER || motg->chg_type == USB_ACA_A_CHARGER || motg->chg_type == USB_ACA_B_CHARGER || motg->chg_type == USB_ACA_C_CHARGER) && mA > IDEV_ACA_CHG_LIMIT) mA = IDEV_ACA_CHG_LIMIT; if (msm_otg_notify_chg_type(motg)) dev_err(motg->phy.dev, "Failed notifying %d charger type to PMIC\n", motg->chg_type); if (motg->cur_power == mA) return; dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA); /* * Use Power Supply API if supported, otherwise fallback * to legacy pm8921 API. */ if (msm_otg_notify_power_supply(motg, mA)) pm8921_charger_vbus_draw(mA); motg->cur_power = mA; } static void msm_otg_notify_usb_attached(void) { struct msm_otg *motg = the_msm_otg; if (motg->connect_type != CONNECT_TYPE_USB) { motg->connect_type = CONNECT_TYPE_USB; queue_work(motg->usb_wq, &motg->notifier_work); } motg->ac_detect_count = 0; __cancel_delayed_work(&motg->ac_detect_work); } static int msm_otg_set_power(struct usb_phy *phy, unsigned mA) { struct msm_otg *motg = container_of(phy, struct msm_otg, phy); /* * Gadget driver uses set_power method to notify about the * available current based on suspend/configured states. * * IDEV_CHG can be drawn irrespective of suspend/un-configured * states when CDP/ACA is connected. */ if (motg->chg_type == USB_SDP_CHARGER) msm_otg_notify_charger(motg, mA); return 0; } static void msm_otg_start_host(struct usb_otg *otg, int on) { struct msm_otg *motg = container_of(otg->phy, struct msm_otg, phy); struct msm_otg_platform_data *pdata = motg->pdata; struct usb_hcd *hcd; if (!otg->host) return; if (USB_disabled) { if (stop_usb_host == TRY_STOP_HOST_STATE) { USBH_INFO("[USB_disabled] %s(%d) to stop host \n", __func__ , on); stop_usb_host = STOP_HOST_STATE; } else { USBH_INFO("[USB_disabled] %s(%d) return\n", __func__ , on); if (on) { re_enable_host = TRY_ENABLE_HOST_STATE; stop_usb_host = STOP_HOST_STATE; } return; } } hcd = bus_to_hcd(otg->host); if (on) { USBH_DEBUG("host on\n"); if (pdata->otg_control == OTG_PHY_CONTROL) ulpi_write(otg->phy, OTG_COMP_DISABLE, ULPI_SET(ULPI_PWR_CLK_MNG_REG)); /* * Some boards have a switch cotrolled by gpio * to enable/disable internal HUB. Enable internal * HUB before kicking the host. */ if (pdata->setup_gpio) pdata->setup_gpio(OTG_STATE_A_HOST); usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); } else { USBH_DEBUG("host off\n"); usb_remove_hcd(hcd); /* HCD core reset all bits of PORTSC. select ULPI phy */ writel_relaxed(0x80000000, USB_PORTSC); if (pdata->setup_gpio) pdata->setup_gpio(OTG_STATE_UNDEFINED); if (pdata->otg_control == OTG_PHY_CONTROL) ulpi_write(otg->phy, OTG_COMP_DISABLE, ULPI_CLR(ULPI_PWR_CLK_MNG_REG)); } } static int msm_otg_usbdev_notify(struct notifier_block *self, unsigned long action, void *priv) { struct msm_otg *motg = container_of(self, struct msm_otg, usbdev_nb); struct usb_otg *otg = motg->phy.otg; struct usb_device *udev = priv; if (action == USB_BUS_ADD || action == USB_BUS_REMOVE) goto out; if (udev->bus != otg->host) goto out; /* * Interested in devices connected directly to the root hub. * ACA dock can supply IDEV_CHG irrespective devices connected * on the accessory port. */ if (!udev->parent || udev->parent->parent || motg->chg_type == USB_ACA_DOCK_CHARGER) goto out; switch (action) { case USB_DEVICE_ADD: if (aca_enabled()) usb_disable_autosuspend(udev); if (otg->phy->state == OTG_STATE_A_WAIT_BCON) { pr_debug("B_CONN set\n"); set_bit(B_CONN, &motg->inputs); msm_otg_del_timer(motg); otg->phy->state = OTG_STATE_A_HOST; /* * OTG PET: A-device must end session within * 10 sec after PET enumeration. */ if (udev->quirks & USB_QUIRK_OTG_PET) msm_otg_start_timer(motg, TA_TST_MAINT, A_TST_MAINT); } /* fall through */ case USB_DEVICE_CONFIG: if (udev->actconfig) motg->mA_port = udev->actconfig->desc.bMaxPower * 2; else motg->mA_port = IUNIT; if (otg->phy->state == OTG_STATE_B_HOST) msm_otg_del_timer(motg); break; case USB_DEVICE_REMOVE: if ((otg->phy->state == OTG_STATE_A_HOST) || (otg->phy->state == OTG_STATE_A_SUSPEND)) { pr_debug("B_CONN clear\n"); clear_bit(B_CONN, &motg->inputs); /* * OTG PET: A-device must end session after * PET disconnection if it is enumerated * with bcdDevice[0] = 1. USB core sets * bus->otg_vbus_off for us. clear it here. */ if (udev->bus->otg_vbus_off) { udev->bus->otg_vbus_off = 0; set_bit(A_BUS_DROP, &motg->inputs); } queue_work(system_nrt_wq, &motg->sm_work); } default: break; } if (test_bit(ID_A, &motg->inputs)) msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX - motg->mA_port); out: return NOTIFY_OK; } static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on) { int ret; static bool vbus_is_on; if (vbus_is_on == on) return; if (motg->pdata->vbus_power) { ret = motg->pdata->vbus_power(on); if (!ret) vbus_is_on = on; } else { /* send connect type to battery to enable boost 5v */ vbus_is_on = on; } if (on) { motg->connect_type = CONNECT_TYPE_INTERNAL; queue_work(motg->usb_wq, &motg->notifier_work); } else { motg->connect_type = CONNECT_TYPE_CLEAR; queue_work(motg->usb_wq, &motg->notifier_work); } return; #if 0 if (!vbus_otg) { pr_err("vbus_otg is NULL."); return; } /* * if entering host mode tell the charger to not draw any current * from usb before turning on the boost. * if exiting host mode disable the boost before enabling to draw * current from the source. */ if (on) { msm_otg_notify_host_mode(motg, on); ret = regulator_enable(vbus_otg); if (ret) { pr_err("unable to enable vbus_otg\n"); return; } vbus_is_on = true; } else { ret = regulator_disable(vbus_otg); if (ret) { pr_err("unable to disable vbus_otg\n"); return; } msm_otg_notify_host_mode(motg, on); vbus_is_on = false; } #endif } static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host) { struct msm_otg *motg = container_of(otg->phy, struct msm_otg, phy); struct usb_hcd *hcd; /* * Fail host registration if this board can support * only peripheral configuration. */ if (motg->pdata->mode == USB_PERIPHERAL) { USBH_INFO("Host mode is not supported\n"); return -ENODEV; } if (!host) { USB_WARNING("%s: no host\n", __func__); if (otg->phy->state == OTG_STATE_A_HOST) { pm_runtime_get_sync(otg->phy->dev); usb_unregister_notify(&motg->usbdev_nb); msm_otg_start_host(otg, 0); msm_hsusb_vbus_power(motg, 0); otg->host = NULL; otg->phy->state = OTG_STATE_UNDEFINED; queue_work(system_nrt_wq, &motg->sm_work); } else { otg->host = NULL; } return 0; } hcd = bus_to_hcd(host); hcd->power_budget = motg->pdata->power_budget; #ifdef CONFIG_USB_OTG host->otg_port = 1; #endif motg->usbdev_nb.notifier_call = msm_otg_usbdev_notify; usb_register_notify(&motg->usbdev_nb); otg->host = host; USBH_DEBUG("host driver registered w/ tranceiver\n"); /* * Kick the state machine work, if peripheral is not supported * or peripheral is already registered with us. */ if (motg->pdata->mode == USB_HOST || otg->gadget) { USB_WARNING("host only, otg->gadget exist\n"); pm_runtime_get_sync(otg->phy->dev); queue_work(system_nrt_wq, &motg->sm_work); } return 0; } static void msm_otg_start_peripheral(struct usb_otg *otg, int on) { int ret; struct msm_otg *motg = container_of(otg->phy, struct msm_otg, phy); struct msm_otg_platform_data *pdata = motg->pdata; if (!otg->gadget) return; if (on) { USBH_DEBUG("gadget on\n"); /* FIXME: hold a wake_lock here... */ wake_lock(&motg->wlock); /* * Some boards have a switch cotrolled by gpio * to enable/disable internal HUB. Disable internal * HUB before kicking the gadget. */ if (pdata->setup_gpio) pdata->setup_gpio(OTG_STATE_B_PERIPHERAL); /* Configure BUS performance parameters for MAX bandwidth */ if (motg->bus_perf_client && debug_bus_voting_enabled) { ret = msm_bus_scale_client_update_request( motg->bus_perf_client, 1); if (ret) dev_err(motg->phy.dev, "%s: Failed to vote for " "bus bandwidth %d\n", __func__, ret); } usb_gadget_vbus_connect(otg->gadget); } else { USBH_DEBUG("gadget off\n"); usb_gadget_vbus_disconnect(otg->gadget); /* Configure BUS performance parameters to default */ if (motg->bus_perf_client) { ret = msm_bus_scale_client_update_request( motg->bus_perf_client, 0); if (ret) dev_err(motg->phy.dev, "%s: Failed to devote " "for bus bw %d\n", __func__, ret); } if (pdata->setup_gpio) pdata->setup_gpio(OTG_STATE_UNDEFINED); /* FIXME: release a wake lock here... */ wake_unlock(&motg->wlock); } } static void usb_disable_work(struct work_struct *w) { struct msm_otg *motg = the_msm_otg; struct usb_phy *usb_phy = &motg->phy; USBH_INFO("%s\n", __func__); motg->chg_type = USB_DCP_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; msm_otg_start_peripheral(usb_phy->otg, 0); usb_phy->state = OTG_STATE_B_IDLE; queue_work(system_nrt_wq, &motg->sm_work); } static void msm_otg_notify_usb_disabled(void) { struct msm_otg *motg = the_msm_otg; struct usb_phy *usb_phy = &motg->phy; USBH_INFO("%s\n", __func__); usb_gadget_vbus_disconnect(usb_phy->otg->gadget); queue_work(system_nrt_wq, &motg->usb_disable_work); } static int msm_otg_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { struct msm_otg *motg = container_of(otg->phy, struct msm_otg, phy); /* * Fail peripheral registration if this board can support * only host configuration. */ if (motg->pdata->mode == USB_HOST) { USBH_ERR("Peripheral mode is not supported\n"); return -ENODEV; } if (!gadget) { USB_WARNING("%s: no gadget\n", __func__); if (otg->phy->state == OTG_STATE_B_PERIPHERAL) { pm_runtime_get_sync(otg->phy->dev); msm_otg_start_peripheral(otg, 0); otg->gadget = NULL; otg->phy->state = OTG_STATE_UNDEFINED; queue_work(system_nrt_wq, &motg->sm_work); } else { otg->gadget = NULL; } return 0; } otg->gadget = gadget; USBH_DEBUG("peripheral driver registered w/ tranceiver\n"); /* * Kick the state machine work, if host is not supported * or host is already registered with us. */ if (motg->pdata->mode == USB_PERIPHERAL || otg->host) { USB_WARNING("peripheral only, otg->host exist\n"); pm_runtime_get_sync(otg->phy->dev); queue_work(system_nrt_wq, &motg->sm_work); } return 0; } static bool msm_chg_aca_detect(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 int_sts; bool ret = false; if (!aca_enabled()) goto out; if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) goto out; int_sts = ulpi_read(phy, 0x87); switch (int_sts & 0x1C) { case 0x08: if (!test_and_set_bit(ID_A, &motg->inputs)) { USBH_DEBUG("ID_A\n"); motg->chg_type = USB_ACA_A_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; clear_bit(ID_B, &motg->inputs); clear_bit(ID_C, &motg->inputs); set_bit(ID, &motg->inputs); ret = true; } break; case 0x0C: if (!test_and_set_bit(ID_B, &motg->inputs)) { USBH_DEBUG("ID_B\n"); motg->chg_type = USB_ACA_B_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; clear_bit(ID_A, &motg->inputs); clear_bit(ID_C, &motg->inputs); set_bit(ID, &motg->inputs); ret = true; } break; case 0x10: if (!test_and_set_bit(ID_C, &motg->inputs)) { USBH_DEBUG("ID_C\n"); motg->chg_type = USB_ACA_C_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; clear_bit(ID_A, &motg->inputs); clear_bit(ID_B, &motg->inputs); set_bit(ID, &motg->inputs); ret = true; } break; case 0x04: if (test_and_clear_bit(ID, &motg->inputs)) { dev_dbg(phy->dev, "ID_GND\n"); motg->chg_type = USB_INVALID_CHARGER; motg->chg_state = USB_CHG_STATE_UNDEFINED; clear_bit(ID_A, &motg->inputs); clear_bit(ID_B, &motg->inputs); clear_bit(ID_C, &motg->inputs); ret = true; } break; default: ret = test_and_clear_bit(ID_A, &motg->inputs) | test_and_clear_bit(ID_B, &motg->inputs) | test_and_clear_bit(ID_C, &motg->inputs) | !test_and_set_bit(ID, &motg->inputs); if (ret) { USBH_DEBUG("ID A/B/C/GND is no more\n"); motg->chg_type = USB_INVALID_CHARGER; motg->chg_state = USB_CHG_STATE_UNDEFINED; } } out: return ret; } static void msm_chg_enable_aca_det(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; if (!aca_enabled()) return; switch (motg->pdata->phy_type) { case SNPS_28NM_INTEGRATED_PHY: /* Disable ID_GND in link and PHY */ writel_relaxed(readl_relaxed(USB_OTGSC) & ~(OTGSC_IDPU | OTGSC_IDIE), USB_OTGSC); ulpi_write(phy, 0x01, 0x0C); ulpi_write(phy, 0x10, 0x0F); ulpi_write(phy, 0x10, 0x12); /* Disable PMIC ID pull-up */ pm8xxx_usb_id_pullup(0); /* Enable ACA ID detection */ ulpi_write(phy, 0x20, 0x85); aca_id_turned_on = true; break; default: break; } } static void msm_chg_enable_aca_intr(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; if (!aca_enabled()) return; switch (motg->pdata->phy_type) { case SNPS_28NM_INTEGRATED_PHY: /* Enable ACA Detection interrupt (on any RID change) */ ulpi_write(phy, 0x01, 0x94); break; default: break; } } static void msm_chg_disable_aca_intr(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; if (!aca_enabled()) return; switch (motg->pdata->phy_type) { case SNPS_28NM_INTEGRATED_PHY: ulpi_write(phy, 0x01, 0x95); break; default: break; } } static bool msm_chg_check_aca_intr(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; bool ret = false; if (!aca_enabled()) return ret; switch (motg->pdata->phy_type) { case SNPS_28NM_INTEGRATED_PHY: if (ulpi_read(phy, 0x91) & 1) { USBH_DEBUG("RID change\n"); ulpi_write(phy, 0x01, 0x92); ret = msm_chg_aca_detect(motg); } default: break; } return ret; } static void msm_otg_id_timer_func(unsigned long data) { struct msm_otg *motg = (struct msm_otg *) data; if (!aca_enabled()) return; if (atomic_read(&motg->in_lpm)) { dev_dbg(motg->phy.dev, "timer: in lpm\n"); return; } if (motg->phy.state == OTG_STATE_A_SUSPEND) goto out; if (msm_chg_check_aca_intr(motg)) { dev_dbg(motg->phy.dev, "timer: aca work\n"); queue_work(system_nrt_wq, &motg->sm_work); } out: if (!test_bit(ID, &motg->inputs) || test_bit(ID_A, &motg->inputs)) mod_timer(&motg->id_timer, ID_TIMER_FREQ); } static bool msm_chg_check_secondary_det(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 chg_det; bool ret = false; switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x34); ret = chg_det & (1 << 4); break; case SNPS_28NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x87); ret = chg_det & 1; break; default: break; } return ret; } static void msm_chg_enable_secondary_det(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 chg_det; switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x34); /* Turn off charger block */ chg_det |= ~(1 << 1); ulpi_write(phy, chg_det, 0x34); udelay(20); /* control chg block via ULPI */ chg_det &= ~(1 << 3); ulpi_write(phy, chg_det, 0x34); /* put it in host mode for enabling D- source */ chg_det &= ~(1 << 2); ulpi_write(phy, chg_det, 0x34); /* Turn on chg detect block */ chg_det &= ~(1 << 1); ulpi_write(phy, chg_det, 0x34); udelay(20); /* enable chg detection */ chg_det &= ~(1 << 0); ulpi_write(phy, chg_det, 0x34); break; case SNPS_28NM_INTEGRATED_PHY: /* * Configure DM as current source, DP as current sink * and enable battery charging comparators. */ ulpi_write(phy, 0x8, 0x85); ulpi_write(phy, 0x2, 0x85); ulpi_write(phy, 0x1, 0x85); break; default: break; } } static bool msm_chg_check_primary_det(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 chg_det; bool ret = false; switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x34); ret = chg_det & (1 << 4); break; case SNPS_28NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x87); ret = chg_det & 1; /* Turn off VDP_SRC */ ulpi_write(phy, 0x3, 0x86); msleep(20); break; default: break; } return ret; } static void msm_chg_enable_primary_det(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 chg_det; switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x34); /* enable chg detection */ chg_det &= ~(1 << 0); ulpi_write(phy, chg_det, 0x34); break; case SNPS_28NM_INTEGRATED_PHY: /* * Configure DP as current source, DM as current sink * and enable battery charging comparators. */ ulpi_write(phy, 0x2, 0x85); ulpi_write(phy, 0x1, 0x85); break; default: break; } } static bool msm_chg_check_dcd(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 line_state; bool ret = false; switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: line_state = ulpi_read(phy, 0x15); ret = !(line_state & 1); break; case SNPS_28NM_INTEGRATED_PHY: line_state = ulpi_read(phy, 0x87); ret = line_state & 2; break; default: break; } return ret; } static void msm_chg_disable_dcd(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 chg_det; switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x34); chg_det &= ~(1 << 5); ulpi_write(phy, chg_det, 0x34); break; case SNPS_28NM_INTEGRATED_PHY: ulpi_write(phy, 0x10, 0x86); break; default: break; } } static void msm_chg_enable_dcd(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 chg_det; switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x34); /* Turn on D+ current source */ chg_det |= (1 << 5); ulpi_write(phy, chg_det, 0x34); break; case SNPS_28NM_INTEGRATED_PHY: /* Data contact detection enable */ ulpi_write(phy, 0x10, 0x85); break; default: break; } } static void msm_chg_block_on(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 func_ctrl, chg_det; /* put the controller in non-driving mode */ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL); func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK; func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL); switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x34); /* control chg block via ULPI */ chg_det &= ~(1 << 3); ulpi_write(phy, chg_det, 0x34); /* Turn on chg detect block */ chg_det &= ~(1 << 1); ulpi_write(phy, chg_det, 0x34); udelay(20); break; case SNPS_28NM_INTEGRATED_PHY: ulpi_write(phy, 0x6, 0xC); /* Clear charger detecting control bits */ ulpi_write(phy, 0x1F, 0x86); /* Clear alt interrupt latch and enable bits */ ulpi_write(phy, 0x1F, 0x92); ulpi_write(phy, 0x1F, 0x95); udelay(100); break; default: break; } } static void msm_chg_block_off(struct msm_otg *motg) { struct usb_phy *phy = &motg->phy; u32 func_ctrl, chg_det; switch (motg->pdata->phy_type) { case CI_45NM_INTEGRATED_PHY: chg_det = ulpi_read(phy, 0x34); /* Turn off charger block */ chg_det |= ~(1 << 1); ulpi_write(phy, chg_det, 0x34); break; case SNPS_28NM_INTEGRATED_PHY: /* Clear charger detecting control bits */ ulpi_write(phy, 0x3F, 0x86); /* Clear alt interrupt latch and enable bits */ ulpi_write(phy, 0x1F, 0x92); ulpi_write(phy, 0x1F, 0x95); ulpi_write(phy, 0x6, 0xB); break; default: break; } /* put the controller in normal mode */ func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL); func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK; func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL; ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL); } static const char *chg_to_string(enum usb_chg_type chg_type) { switch (chg_type) { case USB_SDP_CHARGER: return "USB_SDP_CHARGER"; case USB_DCP_CHARGER: return "USB_DCP_CHARGER"; case USB_CDP_CHARGER: return "USB_CDP_CHARGER"; case USB_ACA_A_CHARGER: return "USB_ACA_A_CHARGER"; case USB_ACA_B_CHARGER: return "USB_ACA_B_CHARGER"; case USB_ACA_C_CHARGER: return "USB_ACA_C_CHARGER"; case USB_ACA_DOCK_CHARGER: return "USB_ACA_DOCK_CHARGER"; case USB_PROPRIETARY_CHARGER: return "USB_PROPRIETARY_CHARGER"; default: return "INVALID_CHARGER"; } } #define MSM_CHG_DCD_TIMEOUT (750 * HZ/1000) /* 750 msec */ #define MSM_CHG_DCD_POLL_TIME (50 * HZ/1000) /* 50 msec */ #define MSM_CHG_PRIMARY_DET_TIME (50 * HZ/1000) /* TVDPSRC_ON */ #define MSM_CHG_SECONDARY_DET_TIME (50 * HZ/1000) /* TVDMSRC_ON */ static void msm_chg_detect_work(struct work_struct *w) { struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work); struct usb_otg *otg = motg->phy.otg; bool is_dcd = false, tmout, vout, is_aca; u32 line_state, dm_vlgc; unsigned long delay; USBH_INFO("%s: state:%s\n", __func__, chg_state_string(motg->chg_state)); #ifdef CONFIG_USB_OTG_HOST_CHG if (otg->phy->state >= OTG_STATE_A_IDLE && cable_get_accessory_type() != DOCK_STATE_HOST_CHG_DOCK) { #else if (otg->phy->state >= OTG_STATE_A_IDLE) { #endif motg->chg_state = USB_CHG_STATE_UNDEFINED; USBH_INFO("%s: usb host, charger state:%s\n", __func__, chg_state_string(motg->chg_state)); if (motg->connect_type != CONNECT_TYPE_NONE) { motg->connect_type = CONNECT_TYPE_NONE; queue_work(motg->usb_wq, &motg->notifier_work); } return; } switch (motg->chg_state) { case USB_CHG_STATE_UNDEFINED: msm_chg_block_on(motg); msm_chg_enable_dcd(motg); msm_chg_enable_aca_det(motg); motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD; motg->dcd_time = 0; delay = MSM_CHG_DCD_POLL_TIME; break; case USB_CHG_STATE_WAIT_FOR_DCD: is_aca = msm_chg_aca_detect(motg); if (is_aca) { /* * ID_A can be ACA dock too. continue * primary detection after DCD. */ if (test_bit(ID_A, &motg->inputs)) { motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD; } else { delay = 0; break; } } is_dcd = msm_chg_check_dcd(motg); motg->dcd_time += MSM_CHG_DCD_POLL_TIME; tmout = motg->dcd_time >= MSM_CHG_DCD_TIMEOUT; if (is_dcd || tmout) { msm_chg_disable_dcd(motg); msm_chg_enable_primary_det(motg); delay = MSM_CHG_PRIMARY_DET_TIME; motg->chg_state = USB_CHG_STATE_DCD_DONE; } else { delay = MSM_CHG_DCD_POLL_TIME; } break; case USB_CHG_STATE_DCD_DONE: vout = msm_chg_check_primary_det(motg); line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS; dm_vlgc = line_state & PORTSC_LS_DM; USBH_INFO("line_state = %x\n",line_state); if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */ if (test_bit(ID_A, &motg->inputs)) { motg->chg_type = USB_ACA_DOCK_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; motg->connect_type = CONNECT_TYPE_UNKNOWN; delay = 0; break; } if (line_state) { /* DP > VLGC */ motg->chg_type = USB_PROPRIETARY_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; motg->connect_type = CONNECT_TYPE_AC; USBH_INFO("DP > VLGC\n"); delay = 0; } else { msm_chg_enable_secondary_det(motg); delay = MSM_CHG_SECONDARY_DET_TIME; motg->chg_state = USB_CHG_STATE_PRIMARY_DONE; } } else { /* DM < VDAT_REF || DM > VLGC */ if (test_bit(ID_A, &motg->inputs)) { motg->chg_type = USB_ACA_A_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; motg->connect_type = CONNECT_TYPE_UNKNOWN; delay = 0; break; } if (line_state) { /* DP > VLGC or/and DM > VLGC */ motg->chg_type = USB_PROPRIETARY_CHARGER; motg->connect_type = CONNECT_TYPE_AC; USBH_INFO("DP > VLGC or/and DM > VLGC\n"); } else { motg->chg_type = USB_SDP_CHARGER; motg->connect_type = CONNECT_TYPE_UNKNOWN; } motg->chg_state = USB_CHG_STATE_DETECTED; delay = 0; } break; case USB_CHG_STATE_PRIMARY_DONE: vout = msm_chg_check_secondary_det(motg); if (vout) { motg->chg_type = USB_DCP_CHARGER; motg->connect_type = CONNECT_TYPE_AC; } else { motg->chg_type = USB_CDP_CHARGER; motg->connect_type = CONNECT_TYPE_USB; } motg->chg_state = USB_CHG_STATE_SECONDARY_DONE; case USB_CHG_STATE_SECONDARY_DONE: motg->chg_state = USB_CHG_STATE_DETECTED; case USB_CHG_STATE_DETECTED: msm_otg_notify_chg_type(motg); msm_chg_block_off(motg); msm_chg_enable_aca_det(motg); /* * Spurious interrupt is seen after enabling ACA detection * due to which charger detection fails in case of PET. * Add delay of 100 microsec to avoid that. */ udelay(100); msm_chg_enable_aca_intr(motg); USBH_INFO("chg_type = %s\n", chg_to_string(motg->chg_type)); queue_work(system_nrt_wq, &motg->sm_work); queue_work(motg->usb_wq, &motg->notifier_work); return; default: return; } queue_delayed_work(system_nrt_wq, &motg->chg_work, delay); } /* * We support OTG, Peripheral only and Host only configurations. In case * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen * via Id pin status or user request (debugfs). Id/BSV interrupts are not * enabled when switch is controlled by user and default mode is supplied * by board file, which can be changed by userspace later. */ static void msm_otg_init_sm(struct msm_otg *motg) { struct msm_otg_platform_data *pdata = motg->pdata; u32 otgsc = readl(USB_OTGSC); switch (pdata->mode) { case USB_OTG: if (pdata->otg_control == OTG_USER_CONTROL) { if (pdata->default_mode == USB_HOST) { clear_bit(ID, &motg->inputs); } else if (pdata->default_mode == USB_PERIPHERAL) { set_bit(ID, &motg->inputs); set_bit(B_SESS_VLD, &motg->inputs); } else { set_bit(ID, &motg->inputs); clear_bit(B_SESS_VLD, &motg->inputs); } } else if (pdata->otg_control == OTG_PHY_CONTROL) { if (otgsc & OTGSC_ID) { set_bit(ID, &motg->inputs); } else { clear_bit(ID, &motg->inputs); set_bit(A_BUS_REQ, &motg->inputs); } if (otgsc & OTGSC_BSV) set_bit(B_SESS_VLD, &motg->inputs); else clear_bit(B_SESS_VLD, &motg->inputs); } else if (pdata->otg_control == OTG_PMIC_CONTROL) { if (pdata->pmic_id_irq) { unsigned long flags; local_irq_save(flags); if (irq_read_line(pdata->pmic_id_irq)) set_bit(ID, &motg->inputs); else clear_bit(ID, &motg->inputs); local_irq_restore(flags); } else set_bit(ID, &motg->inputs); /* * VBUS initial state is reported after PMIC * driver initialization. Wait for it. */ wait_for_completion(&pmic_vbus_init); } break; case USB_HOST: clear_bit(ID, &motg->inputs); break; case USB_PERIPHERAL: set_bit(ID, &motg->inputs); if (pdata->otg_control == OTG_PHY_CONTROL) { if (otgsc & OTGSC_BSV) set_bit(B_SESS_VLD, &motg->inputs); else clear_bit(B_SESS_VLD, &motg->inputs); } else if (pdata->otg_control == OTG_PMIC_CONTROL) { /* * VBUS initial state is reported after PMIC * driver initialization. Wait for it. */ wait_for_completion(&pmic_vbus_init); } break; default: break; } if (test_bit(B_SESS_VLD, &motg->inputs)) { if (motg->pdata->usb_uart_switch) motg->pdata->usb_uart_switch(0); } } static void msm_otg_sm_work(struct work_struct *w) { struct msm_otg *motg = container_of(w, struct msm_otg, sm_work); struct usb_otg *otg = motg->phy.otg; bool work = 0, srp_reqd; USBH_INFO("%s: state:%s bit:0x%08x\n", __func__, state_string(motg->phy.state), (unsigned) motg->inputs); if (!motg->phy.dev) { USB_ERR("%s: otg->dev was null. state: %d\n", __func__, motg->phy.state); return; } mutex_lock(&smwork_sem); pm_runtime_resume(otg->phy->dev); pr_debug("%s work\n", otg_state_string(otg->phy->state)); switch (otg->phy->state) { case OTG_STATE_UNDEFINED: dev_dbg(otg->dev, "OTG_STATE_UNDEFINED state\n"); msm_otg_reset(otg->phy); msm_otg_init_sm(motg); psy = power_supply_get_by_name("usb"); if (!psy) pr_err("couldn't get usb power supply\n"); otg->phy->state = OTG_STATE_B_IDLE; if (!test_bit(B_SESS_VLD, &motg->inputs) && test_bit(ID, &motg->inputs)) { pm_runtime_put_noidle(otg->phy->dev); pm_runtime_suspend(otg->phy->dev); break; } /* FALL THROUGH */ case OTG_STATE_B_IDLE: dev_dbg(otg->dev, "OTG_STATE_B_IDLE state\n"); if ((!test_bit(ID, &motg->inputs) || test_bit(ID_A, &motg->inputs)) && otg->host) { USBH_INFO("!id || id_a\n"); clear_bit(B_BUS_REQ, &motg->inputs); set_bit(A_BUS_REQ, &motg->inputs); otg->phy->state = OTG_STATE_A_IDLE; #ifdef CONFIG_USB_OTG_HOST_CHG if (motg->connect_type != CONNECT_TYPE_NONE && cable_get_accessory_type() != DOCK_STATE_HOST_CHG_DOCK && motg->connect_type != CONNECT_TYPE_CLEAR) { #else if (motg->connect_type != CONNECT_TYPE_NONE) { #endif motg->connect_type = CONNECT_TYPE_NONE; queue_work(motg->usb_wq, &motg->notifier_work); } work = 1; } else if (test_bit(B_SESS_VLD, &motg->inputs)) { USBH_INFO("b_sess_vld\n"); switch (motg->chg_state) { case USB_CHG_STATE_UNDEFINED: msm_chg_detect_work(&motg->chg_work.work); break; case USB_CHG_STATE_DETECTED: switch (motg->chg_type) { case USB_DCP_CHARGER: /* Enable VDP_SRC */ ulpi_write(otg->phy, 0x2, 0x85); /* fall through */ case USB_PROPRIETARY_CHARGER: msm_otg_notify_charger(motg, IDEV_CHG_MAX); if (motg->reset_phy_before_lpm) msm_otg_reset(otg->phy); pm_runtime_put_noidle(otg->phy->dev); pm_runtime_suspend(otg->phy->dev); break; case USB_ACA_B_CHARGER: msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX); /* * (ID_B --> ID_C) PHY_ALT interrupt can * not be detected in LPM. */ break; case USB_CDP_CHARGER: msm_otg_notify_charger(motg, IDEV_CHG_MAX); msm_otg_start_peripheral(otg, 1); otg->phy->state = OTG_STATE_B_PERIPHERAL; break; case USB_ACA_C_CHARGER: msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX); msm_otg_start_peripheral(otg, 1); otg->phy->state = OTG_STATE_B_PERIPHERAL; break; case USB_SDP_CHARGER: /* Turn off VDP_SRC */ ulpi_write(otg->phy, 0x3, 0x86); msm_otg_notify_charger(motg, IDEV_CHG_MIN); msm_otg_start_peripheral(otg, 1); otg->phy->state = OTG_STATE_B_PERIPHERAL; motg->ac_detect_count = 0; queue_delayed_work(system_nrt_wq, &motg->ac_detect_work, 2 * HZ); break; default: break; } break; default: break; } } else if (test_bit(B_BUS_REQ, &motg->inputs)) { USBH_INFO("b_sess_end && b_bus_req\n"); if (msm_otg_start_srp(otg) < 0) { clear_bit(B_BUS_REQ, &motg->inputs); work = 1; break; } otg->phy->state = OTG_STATE_B_SRP_INIT; msm_otg_start_timer(motg, TB_SRP_FAIL, B_SRP_FAIL); break; } else { pr_debug("chg_work cancel"); USBH_INFO("!b_sess_vld && id\n"); cancel_delayed_work_sync(&motg->chg_work); motg->chg_state = USB_CHG_STATE_UNDEFINED; motg->chg_type = USB_INVALID_CHARGER; msm_otg_notify_charger(motg, 0); msm_otg_reset(otg->phy); if (motg->connect_type != CONNECT_TYPE_NONE) { motg->connect_type = CONNECT_TYPE_NONE; queue_work(motg->usb_wq, &motg->notifier_work); } pm_runtime_put_noidle(otg->phy->dev); pm_runtime_suspend(otg->phy->dev); } break; case OTG_STATE_B_SRP_INIT: if (!test_bit(ID, &motg->inputs) || test_bit(ID_A, &motg->inputs) || test_bit(ID_C, &motg->inputs) || (test_bit(B_SESS_VLD, &motg->inputs) && !test_bit(ID_B, &motg->inputs))) { USBH_INFO("!id || id_a/c || b_sess_vld+!id_b\n"); msm_otg_del_timer(motg); otg->phy->state = OTG_STATE_B_IDLE; /* * clear VBUSVLDEXTSEL and VBUSVLDEXT register * bits after SRP initiation. */ ulpi_write(otg->phy, 0x0, 0x98); work = 1; } else if (test_bit(B_SRP_FAIL, &motg->tmouts)) { USBH_INFO("b_srp_fail\n"); pr_info("A-device did not respond to SRP\n"); clear_bit(B_BUS_REQ, &motg->inputs); clear_bit(B_SRP_FAIL, &motg->tmouts); otg_send_event(OTG_EVENT_NO_RESP_FOR_SRP); ulpi_write(otg->phy, 0x0, 0x98); otg->phy->state = OTG_STATE_B_IDLE; motg->b_last_se0_sess = jiffies; work = 1; } break; case OTG_STATE_B_PERIPHERAL: dev_dbg(otg->dev, "OTG_STATE_B_PERIPHERAL state\n"); if (!test_bit(ID, &motg->inputs) || test_bit(ID_A, &motg->inputs) || test_bit(ID_B, &motg->inputs) || !test_bit(B_SESS_VLD, &motg->inputs)) { if (motg->connect_type != CONNECT_TYPE_NONE) { motg->connect_type = CONNECT_TYPE_NONE; queue_work(motg->usb_wq, &motg->notifier_work); } #if 0 if (check_htc_mode_status() != NOT_ON_AUTOBOT) { htc_mode_enable(0); android_switch_default(); } #endif USBH_INFO("!id || id_a/b || !b_sess_vld\n"); motg->chg_state = USB_CHG_STATE_UNDEFINED; motg->chg_type = USB_INVALID_CHARGER; msm_otg_notify_charger(motg, 0); srp_reqd = otg->gadget->otg_srp_reqd; msm_otg_start_peripheral(otg, 0); if (test_bit(ID_B, &motg->inputs)) clear_bit(ID_B, &motg->inputs); clear_bit(B_BUS_REQ, &motg->inputs); otg->phy->state = OTG_STATE_B_IDLE; motg->ac_detect_count = 0; cancel_delayed_work_sync(&motg->ac_detect_work); motg->b_last_se0_sess = jiffies; if (srp_reqd) msm_otg_start_timer(motg, TB_TST_SRP, B_TST_SRP); else work = 1; } else if (test_bit(B_BUS_REQ, &motg->inputs) && otg->gadget->b_hnp_enable && test_bit(A_BUS_SUSPEND, &motg->inputs)) { USBH_INFO("b_bus_req && b_hnp_en && a_bus_suspend\n"); msm_otg_start_timer(motg, TB_ASE0_BRST, B_ASE0_BRST); /* D+ pullup should not be disconnected within 4msec * after A device suspends the bus. Otherwise PET will * fail the compliance test. */ udelay(1000); msm_otg_start_peripheral(otg, 0); otg->phy->state = OTG_STATE_B_WAIT_ACON; /* * start HCD even before A-device enable * pull-up to meet HNP timings. */ otg->host->is_b_host = 1; msm_otg_start_host(otg, 1); } else if (test_bit(A_BUS_SUSPEND, &motg->inputs) && test_bit(B_SESS_VLD, &motg->inputs)) { pr_debug("a_bus_suspend && b_sess_vld\n"); if (motg->caps & ALLOW_LPM_ON_DEV_SUSPEND) { pm_runtime_put_noidle(otg->phy->dev); pm_runtime_suspend(otg->phy->dev); } } else if (test_bit(ID_C, &motg->inputs)) { USBH_INFO("id_c\n"); msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX); } else if (test_bit(B_SESS_VLD, &motg->inputs)) { /* redetect to China AC*/ if (motg->chg_type == USB_DCP_CHARGER || (motg->chg_type == USB_SDP_CHARGER && USB_disabled) ) { msm_otg_start_peripheral(otg, 0); otg->phy->state = OTG_STATE_B_IDLE; work = 1; motg->ac_detect_count = 0; cancel_delayed_work_sync(&motg->ac_detect_work); } else USBH_DEBUG("do nothing !!!\n"); } else USBH_DEBUG("do nothing !!\n"); break; case OTG_STATE_B_WAIT_ACON: if (!test_bit(ID, &motg->inputs) || test_bit(ID_A, &motg->inputs) || test_bit(ID_B, &motg->inputs) || !test_bit(B_SESS_VLD, &motg->inputs)) { USBH_INFO("!id || id_a/b || !b_sess_vld\n"); msm_otg_del_timer(motg); /* * A-device is physically disconnected during * HNP. Remove HCD. */ msm_otg_start_host(otg, 0); otg->host->is_b_host = 0; clear_bit(B_BUS_REQ, &motg->inputs); clear_bit(A_BUS_SUSPEND, &motg->inputs); motg->b_last_se0_sess = jiffies; otg->phy->state = OTG_STATE_B_IDLE; msm_otg_reset(otg->phy); work = 1; } else if (test_bit(A_CONN, &motg->inputs)) { USBH_INFO("a_conn\n"); clear_bit(A_BUS_SUSPEND, &motg->inputs); otg->phy->state = OTG_STATE_B_HOST; /* * PET disconnects D+ pullup after reset is generated * by B device in B_HOST role which is not detected by * B device. As workaorund , start timer of 300msec * and stop timer if A device is enumerated else clear * A_CONN. */ msm_otg_start_timer(motg, TB_TST_CONFIG, B_TST_CONFIG); } else if (test_bit(B_ASE0_BRST, &motg->tmouts)) { USBH_INFO("b_ase0_brst_tmout\n"); pr_info("B HNP fail:No response from A device\n"); msm_otg_start_host(otg, 0); msm_otg_reset(otg->phy); otg->host->is_b_host = 0; clear_bit(B_ASE0_BRST, &motg->tmouts); clear_bit(A_BUS_SUSPEND, &motg->inputs); clear_bit(B_BUS_REQ, &motg->inputs); otg_send_event(OTG_EVENT_HNP_FAILED); otg->phy->state = OTG_STATE_B_IDLE; work = 1; } else if (test_bit(ID_C, &motg->inputs)) { msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX); } break; case OTG_STATE_B_HOST: if (!test_bit(B_BUS_REQ, &motg->inputs) || !test_bit(A_CONN, &motg->inputs) || !test_bit(B_SESS_VLD, &motg->inputs)) { USBH_INFO("!b_bus_req || !a_conn || !b_sess_vld\n"); clear_bit(A_CONN, &motg->inputs); clear_bit(B_BUS_REQ, &motg->inputs); msm_otg_start_host(otg, 0); otg->host->is_b_host = 0; otg->phy->state = OTG_STATE_B_IDLE; msm_otg_reset(otg->phy); work = 1; } else if (test_bit(ID_C, &motg->inputs)) { msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX); } break; case OTG_STATE_A_IDLE: otg->default_a = 1; if (test_bit(ID, &motg->inputs) && !test_bit(ID_A, &motg->inputs)) { USBH_INFO("id && !id_a\n"); otg->default_a = 0; clear_bit(A_BUS_DROP, &motg->inputs); otg->phy->state = OTG_STATE_B_IDLE; del_timer_sync(&motg->id_timer); msm_otg_link_reset(motg); msm_chg_enable_aca_intr(motg); msm_otg_notify_charger(motg, 0); work = 1; } else if (!test_bit(A_BUS_DROP, &motg->inputs) && (test_bit(A_SRP_DET, &motg->inputs) || test_bit(A_BUS_REQ, &motg->inputs))) { USBH_INFO("!a_bus_drop && (a_srp_det || a_bus_req)\n"); clear_bit(A_SRP_DET, &motg->inputs); /* Disable SRP detection */ writel_relaxed((readl_relaxed(USB_OTGSC) & ~OTGSC_INTSTS_MASK) & ~OTGSC_DPIE, USB_OTGSC); otg->phy->state = OTG_STATE_A_WAIT_VRISE; /* VBUS should not be supplied before end of SRP pulse * generated by PET, if not complaince test fail. */ usleep_range(10000, 12000); /* ACA: ID_A: Stop charging untill enumeration */ if (test_bit(ID_A, &motg->inputs)) { msm_otg_notify_charger(motg, 0); } else { #ifdef CONFIG_USB_OTG_HOST_CHG if (htc_otg_vbus == 1 && cable_get_accessory_type() == DOCK_STATE_HOST_CHG_DOCK) msm_hsusb_vbus_power(motg, 0); else msm_hsusb_vbus_power(motg, 1); #else msm_hsusb_vbus_power(motg, 1); #endif } msm_otg_start_timer(motg, TA_WAIT_VRISE, A_WAIT_VRISE); } else { USBH_INFO("No session requested\n"); clear_bit(A_BUS_DROP, &motg->inputs); if (test_bit(ID_A, &motg->inputs)) { msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX); } else if (!test_bit(ID, &motg->inputs)) { msm_otg_notify_charger(motg, 0); /* * A-device is not providing power on VBUS. * Enable SRP detection. */ writel_relaxed(0x13, USB_USBMODE); writel_relaxed((readl_relaxed(USB_OTGSC) & ~OTGSC_INTSTS_MASK) | OTGSC_DPIE, USB_OTGSC); mb(); } } break; case OTG_STATE_A_WAIT_VRISE: if ((test_bit(ID, &motg->inputs) && !test_bit(ID_A, &motg->inputs)) || test_bit(A_BUS_DROP, &motg->inputs) || test_bit(A_WAIT_VRISE, &motg->tmouts)) { USBH_INFO("id || a_bus_drop || a_wait_vrise_tmout\n"); clear_bit(A_BUS_REQ, &motg->inputs); msm_otg_del_timer(motg); msm_hsusb_vbus_power(motg, 0); otg->phy->state = OTG_STATE_A_WAIT_VFALL; msm_otg_start_timer(motg, TA_WAIT_VFALL, A_WAIT_VFALL); } else if (test_bit(A_VBUS_VLD, &motg->inputs)) { USBH_INFO("a_vbus_vld\n"); otg->phy->state = OTG_STATE_A_WAIT_BCON; if (TA_WAIT_BCON > 0) msm_otg_start_timer(motg, TA_WAIT_BCON, A_WAIT_BCON); msm_otg_start_host(otg, 1); msm_chg_enable_aca_det(motg); msm_chg_disable_aca_intr(motg); mod_timer(&motg->id_timer, ID_TIMER_FREQ); if (msm_chg_check_aca_intr(motg)) work = 1; } break; case OTG_STATE_A_WAIT_BCON: if ((test_bit(ID, &motg->inputs) && !test_bit(ID_A, &motg->inputs)) || test_bit(A_BUS_DROP, &motg->inputs) || test_bit(A_WAIT_BCON, &motg->tmouts)) { USBH_INFO("(id && id_a/b/c) || a_bus_drop ||" "a_wait_bcon_tmout\n"); if (test_bit(A_WAIT_BCON, &motg->tmouts)) { pr_info("Device No Response\n"); otg_send_event(OTG_EVENT_DEV_CONN_TMOUT); } msm_otg_del_timer(motg); clear_bit(A_BUS_REQ, &motg->inputs); clear_bit(B_CONN, &motg->inputs); msm_otg_start_host(otg, 0); /* * ACA: ID_A with NO accessory, just the A plug is * attached to ACA: Use IDCHG_MAX for charging */ if (test_bit(ID_A, &motg->inputs)) msm_otg_notify_charger(motg, IDEV_CHG_MIN); else msm_hsusb_vbus_power(motg, 0); otg->phy->state = OTG_STATE_A_WAIT_VFALL; msm_otg_start_timer(motg, TA_WAIT_VFALL, A_WAIT_VFALL); } else if (!test_bit(A_VBUS_VLD, &motg->inputs)) { USBH_INFO("!a_vbus_vld\n"); clear_bit(B_CONN, &motg->inputs); msm_otg_del_timer(motg); msm_otg_start_host(otg, 0); otg->phy->state = OTG_STATE_A_VBUS_ERR; msm_otg_reset(otg->phy); } else if (test_bit(ID_A, &motg->inputs)) { msm_hsusb_vbus_power(motg, 0); } else if (!test_bit(A_BUS_REQ, &motg->inputs)) { /* * If TA_WAIT_BCON is infinite, we don;t * turn off VBUS. Enter low power mode. */ if (TA_WAIT_BCON < 0) pm_runtime_put_sync(otg->phy->dev); } else if (!test_bit(ID, &motg->inputs)) { #ifdef CONFIG_USB_OTG_HOST_CHG if (cable_get_accessory_type() != DOCK_STATE_HOST_CHG_DOCK) msm_hsusb_vbus_power(motg, 1); #endif } if (USB_disabled && stop_usb_host != STOP_HOST_STATE) { USBH_INFO("[USB_disabled] disable USB Host function\n"); stop_usb_host = TRY_STOP_HOST_STATE; re_enable_host = TRY_ENABLE_HOST_STATE; msm_otg_start_host(otg, 0); } else if (!USB_disabled && re_enable_host == TRY_ENABLE_HOST_STATE) { USBH_INFO("[USB_disabled] re-enable USB Host function\n"); re_enable_host = DEFAULT_STATE; stop_usb_host = DEFAULT_STATE; msm_otg_start_host(otg, 1); } break; case OTG_STATE_A_HOST: dev_dbg(otg->dev, "OTG_STATE_A_HOST state\n"); if ((test_bit(ID, &motg->inputs) && !test_bit(ID_A, &motg->inputs)) || test_bit(A_BUS_DROP, &motg->inputs)) { USBH_INFO("id_a/b/c || a_bus_drop\n"); clear_bit(B_CONN, &motg->inputs); clear_bit(A_BUS_REQ, &motg->inputs); msm_otg_del_timer(motg); otg->phy->state = OTG_STATE_A_WAIT_VFALL; msm_otg_start_host(otg, 0); if (!test_bit(ID_A, &motg->inputs)) msm_hsusb_vbus_power(motg, 0); msm_otg_start_timer(motg, TA_WAIT_VFALL, A_WAIT_VFALL); } else if (!test_bit(A_VBUS_VLD, &motg->inputs)) { USBH_INFO("!a_vbus_vld\n"); clear_bit(B_CONN, &motg->inputs); msm_otg_del_timer(motg); otg->phy->state = OTG_STATE_A_VBUS_ERR; msm_otg_start_host(otg, 0); msm_otg_reset(otg->phy); } else if (!test_bit(A_BUS_REQ, &motg->inputs)) { /* * a_bus_req is de-asserted when root hub is * suspended or HNP is in progress. */ USBH_INFO("!a_bus_req\n"); msm_otg_del_timer(motg); otg->phy->state = OTG_STATE_A_SUSPEND; if (otg->host->b_hnp_enable) msm_otg_start_timer(motg, TA_AIDL_BDIS, A_AIDL_BDIS); else pm_runtime_put_sync(otg->phy->dev); } else if (!test_bit(B_CONN, &motg->inputs)) { USBH_INFO("!b_conn\n"); msm_otg_del_timer(motg); otg->phy->state = OTG_STATE_A_WAIT_BCON; if (TA_WAIT_BCON > 0) msm_otg_start_timer(motg, TA_WAIT_BCON, A_WAIT_BCON); if (msm_chg_check_aca_intr(motg)) work = 1; } else if (test_bit(ID_A, &motg->inputs)) { msm_otg_del_timer(motg); msm_hsusb_vbus_power(motg, 0); if (motg->chg_type == USB_ACA_DOCK_CHARGER) msm_otg_notify_charger(motg, IDEV_ACA_CHG_MAX); else msm_otg_notify_charger(motg, IDEV_CHG_MIN - motg->mA_port); } else if (!test_bit(ID, &motg->inputs)) { motg->chg_state = USB_CHG_STATE_UNDEFINED; motg->chg_type = USB_INVALID_CHARGER; #ifdef CONFIG_USB_OTG_HOST_CHG if (htc_otg_vbus == 1 && cable_get_accessory_type() == DOCK_STATE_HOST_CHG_DOCK) msm_hsusb_vbus_power(motg, 0); else { msm_otg_notify_charger(motg, 0); msm_hsusb_vbus_power(motg, 1); } #else msm_otg_notify_charger(motg, 0); msm_hsusb_vbus_power(motg, 1); #endif if (USB_disabled) { USBH_INFO("[USB_disabled] disable USB Host function\n"); re_enable_host = TRY_ENABLE_HOST_STATE; stop_usb_host = TRY_STOP_HOST_STATE; msm_otg_start_host(otg, 0); } } break; case OTG_STATE_A_SUSPEND: if ((test_bit(ID, &motg->inputs) && !test_bit(ID_A, &motg->inputs)) || test_bit(A_BUS_DROP, &motg->inputs) || test_bit(A_AIDL_BDIS, &motg->tmouts)) { USBH_INFO("id_a/b/c || a_bus_drop ||" "a_aidl_bdis_tmout\n"); msm_otg_del_timer(motg); clear_bit(B_CONN, &motg->inputs); otg->phy->state = OTG_STATE_A_WAIT_VFALL; msm_otg_start_host(otg, 0); msm_otg_reset(otg->phy); if (!test_bit(ID_A, &motg->inputs)) msm_hsusb_vbus_power(motg, 0); msm_otg_start_timer(motg, TA_WAIT_VFALL, A_WAIT_VFALL); } else if (!test_bit(A_VBUS_VLD, &motg->inputs)) { USBH_INFO("!a_vbus_vld\n"); msm_otg_del_timer(motg); clear_bit(B_CONN, &motg->inputs); otg->phy->state = OTG_STATE_A_VBUS_ERR; msm_otg_start_host(otg, 0); msm_otg_reset(otg->phy); } else if (!test_bit(B_CONN, &motg->inputs) && otg->host->b_hnp_enable) { USBH_INFO("!b_conn && b_hnp_enable"); otg->phy->state = OTG_STATE_A_PERIPHERAL; msm_otg_host_hnp_enable(otg, 1); otg->gadget->is_a_peripheral = 1; msm_otg_start_peripheral(otg, 1); } else if (!test_bit(B_CONN, &motg->inputs) && !otg->host->b_hnp_enable) { USBH_INFO("!b_conn && !b_hnp_enable"); /* * bus request is dropped during suspend. * acquire again for next device. */ set_bit(A_BUS_REQ, &motg->inputs); otg->phy->state = OTG_STATE_A_WAIT_BCON; if (TA_WAIT_BCON > 0) msm_otg_start_timer(motg, TA_WAIT_BCON, A_WAIT_BCON); if (!USB_disabled && re_enable_host == TRY_ENABLE_HOST_STATE) { USBH_INFO("[USB_disabled] re-enable USB Host function\n"); re_enable_host = DEFAULT_STATE; stop_usb_host = DEFAULT_STATE; msm_otg_start_host(otg, 1); } } else if (test_bit(ID_A, &motg->inputs)) { msm_hsusb_vbus_power(motg, 0); msm_otg_notify_charger(motg, IDEV_CHG_MIN - motg->mA_port); } else if (!test_bit(ID, &motg->inputs)) { msm_otg_notify_charger(motg, 0); msm_hsusb_vbus_power(motg, 1); } break; case OTG_STATE_A_PERIPHERAL: if ((test_bit(ID, &motg->inputs) && !test_bit(ID_A, &motg->inputs)) || test_bit(A_BUS_DROP, &motg->inputs)) { USBH_INFO("id _f/b/c || a_bus_drop\n"); /* Clear BIDL_ADIS timer */ msm_otg_del_timer(motg); otg->phy->state = OTG_STATE_A_WAIT_VFALL; msm_otg_start_peripheral(otg, 0); otg->gadget->is_a_peripheral = 0; msm_otg_start_host(otg, 0); msm_otg_reset(otg->phy); if (!test_bit(ID_A, &motg->inputs)) msm_hsusb_vbus_power(motg, 0); msm_otg_start_timer(motg, TA_WAIT_VFALL, A_WAIT_VFALL); } else if (!test_bit(A_VBUS_VLD, &motg->inputs)) { USBH_INFO("!a_vbus_vld\n"); /* Clear BIDL_ADIS timer */ msm_otg_del_timer(motg); otg->phy->state = OTG_STATE_A_VBUS_ERR; msm_otg_start_peripheral(otg, 0); otg->gadget->is_a_peripheral = 0; msm_otg_start_host(otg, 0); } else if (test_bit(A_BIDL_ADIS, &motg->tmouts)) { USBH_INFO("a_bidl_adis_tmout\n"); msm_otg_start_peripheral(otg, 0); otg->gadget->is_a_peripheral = 0; otg->phy->state = OTG_STATE_A_WAIT_BCON; set_bit(A_BUS_REQ, &motg->inputs); msm_otg_host_hnp_enable(otg, 0); if (TA_WAIT_BCON > 0) msm_otg_start_timer(motg, TA_WAIT_BCON, A_WAIT_BCON); } else if (test_bit(ID_A, &motg->inputs)) { msm_hsusb_vbus_power(motg, 0); msm_otg_notify_charger(motg, IDEV_CHG_MIN - motg->mA_port); } else if (!test_bit(ID, &motg->inputs)) { USBH_INFO("!id\n"); msm_otg_notify_charger(motg, 0); msm_hsusb_vbus_power(motg, 1); } break; case OTG_STATE_A_WAIT_VFALL: if (test_bit(A_WAIT_VFALL, &motg->tmouts)) { clear_bit(A_VBUS_VLD, &motg->inputs); otg->phy->state = OTG_STATE_A_IDLE; work = 1; } break; case OTG_STATE_A_VBUS_ERR: if ((test_bit(ID, &motg->inputs) && !test_bit(ID_A, &motg->inputs)) || test_bit(A_BUS_DROP, &motg->inputs) || test_bit(A_CLR_ERR, &motg->inputs)) { otg->phy->state = OTG_STATE_A_WAIT_VFALL; if (!test_bit(ID_A, &motg->inputs)) msm_hsusb_vbus_power(motg, 0); msm_otg_start_timer(motg, TA_WAIT_VFALL, A_WAIT_VFALL); motg->chg_state = USB_CHG_STATE_UNDEFINED; motg->chg_type = USB_INVALID_CHARGER; msm_otg_notify_charger(motg, 0); } break; default: break; } mutex_unlock(&smwork_sem); if (work) queue_work(system_nrt_wq, &motg->sm_work); } static irqreturn_t msm_otg_irq(int irq, void *data) { struct msm_otg *motg = data; struct usb_otg *otg = motg->phy.otg; u32 otgsc = 0, usbsts, pc; bool work = 0; irqreturn_t ret = IRQ_HANDLED; if (atomic_read(&motg->in_lpm)) { pr_debug("OTG IRQ: in LPM\n"); disable_irq_nosync(irq); motg->async_int = 1; if (atomic_read(&motg->pm_suspended)) motg->sm_work_pending = true; else pm_request_resume(otg->phy->dev); return IRQ_HANDLED; } usbsts = readl(USB_USBSTS); otgsc = readl(USB_OTGSC); if (!(otgsc & OTG_OTGSTS_MASK) && !(usbsts & OTG_USBSTS_MASK)) return IRQ_NONE; if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) { if (otgsc & OTGSC_ID) { pr_debug("Id set\n"); set_bit(ID, &motg->inputs); } else { pr_debug("Id clear\n"); /* * Assert a_bus_req to supply power on * VBUS when Micro/Mini-A cable is connected * with out user intervention. */ set_bit(A_BUS_REQ, &motg->inputs); clear_bit(ID, &motg->inputs); msm_chg_enable_aca_det(motg); } writel_relaxed(otgsc, USB_OTGSC); work = 1; } else if (otgsc & OTGSC_DPIS) { pr_debug("DPIS detected\n"); writel_relaxed(otgsc, USB_OTGSC); set_bit(A_SRP_DET, &motg->inputs); set_bit(A_BUS_REQ, &motg->inputs); work = 1; } else if (otgsc & OTGSC_BSVIS) { writel_relaxed(otgsc, USB_OTGSC); } else if (usbsts & STS_PCI) { pc = readl_relaxed(USB_PORTSC); pr_debug("portsc = %x\n", pc); ret = IRQ_NONE; /* * HCD Acks PCI interrupt. We use this to switch * between different OTG states. */ work = 1; switch (otg->phy->state) { case OTG_STATE_A_SUSPEND: if (otg->host->b_hnp_enable && (pc & PORTSC_CSC) && !(pc & PORTSC_CCS)) { pr_debug("B_CONN clear\n"); clear_bit(B_CONN, &motg->inputs); msm_otg_del_timer(motg); } break; case OTG_STATE_A_PERIPHERAL: /* * A-peripheral observed activity on bus. * clear A_BIDL_ADIS timer. */ msm_otg_del_timer(motg); work = 0; break; case OTG_STATE_B_WAIT_ACON: if ((pc & PORTSC_CSC) && (pc & PORTSC_CCS)) { pr_debug("A_CONN set\n"); set_bit(A_CONN, &motg->inputs); /* Clear ASE0_BRST timer */ msm_otg_del_timer(motg); } break; case OTG_STATE_B_HOST: if ((pc & PORTSC_CSC) && !(pc & PORTSC_CCS)) { pr_debug("A_CONN clear\n"); clear_bit(A_CONN, &motg->inputs); msm_otg_del_timer(motg); } break; case OTG_STATE_A_WAIT_BCON: if (TA_WAIT_BCON < 0) set_bit(A_BUS_REQ, &motg->inputs); default: work = 0; break; } } else if (usbsts & STS_URI) { ret = IRQ_NONE; switch (otg->phy->state) { case OTG_STATE_A_PERIPHERAL: /* * A-peripheral observed activity on bus. * clear A_BIDL_ADIS timer. */ msm_otg_del_timer(motg); work = 0; break; default: work = 0; break; } } else if (usbsts & STS_SLI) { ret = IRQ_NONE; work = 0; switch (otg->phy->state) { case OTG_STATE_B_PERIPHERAL: if (otg->gadget->b_hnp_enable) { set_bit(A_BUS_SUSPEND, &motg->inputs); set_bit(B_BUS_REQ, &motg->inputs); work = 1; } break; case OTG_STATE_A_PERIPHERAL: msm_otg_start_timer(motg, TA_BIDL_ADIS, A_BIDL_ADIS); break; default: break; } } else if ((usbsts & PHY_ALT_INT)) { writel_relaxed(PHY_ALT_INT, USB_USBSTS); if (msm_chg_check_aca_intr(motg)) work = 1; ret = IRQ_HANDLED; } if (work) queue_work(system_nrt_wq, &motg->sm_work); return ret; } /* The dedicated 9V detection GPIO will be high if VBUS is in and over 6V. * Since D+/D- status is not involved, there is no timing issue between * D+/D- and VBUS. 9V AC should NOT be found here. */ static void ac_detect_expired_work(struct work_struct *w) { u32 delay = 0, line_state; struct msm_otg *motg = the_msm_otg; struct usb_phy *usb_phy = &motg->phy; line_state = readl(USB_PORTSC) & PORTSC_LS; USBH_INFO("%s: count = %d, connect_type = %d, line_state = %x\n", __func__, motg->ac_detect_count, motg->connect_type,line_state << 10); if (motg->connect_type == CONNECT_TYPE_USB || motg->ac_detect_count >= 5) return; /* detect shorted D+/D-, indicating AC power */ if (line_state != PORTSC_LS) { #ifdef CONFIG_CABLE_DETECT_ACCESSORY if (cable_get_accessory_type() == DOCK_STATE_CAR ||cable_get_accessory_type() == DOCK_STATE_AUDIO_DOCK) { USBH_INFO("car/audio dock mode charger\n"); motg->chg_type = USB_DCP_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; motg->connect_type = CONNECT_TYPE_AC; motg->ac_detect_count = 0; msm_otg_start_peripheral(usb_phy->otg, 0); usb_phy->state = OTG_STATE_B_IDLE; queue_work(system_nrt_wq, &motg->sm_work); queue_work(motg->usb_wq, &motg->notifier_work); return; } { int dock_result = check_three_pogo_dock(); if (dock_result == 2) { USBH_INFO("three pogo dock AC type\n"); motg->chg_type = USB_DCP_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; motg->connect_type = CONNECT_TYPE_AC; motg->ac_detect_count = 0; msm_otg_start_peripheral(usb_phy->otg, 0); usb_phy->state = OTG_STATE_B_IDLE; queue_work(system_nrt_wq, &motg->sm_work); queue_work(motg->usb_wq, &motg->notifier_work); return; } else if (dock_result == 1) { USBH_INFO("three pogo dock USB type\n"); motg->chg_type = USB_INVALID_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; motg->connect_type = CONNECT_TYPE_NONE; motg->ac_detect_count = 0; msm_otg_start_peripheral(usb_phy->otg, 0); usb_phy->state = OTG_STATE_B_IDLE; queue_work(system_nrt_wq, &motg->sm_work); queue_work(motg->usb_wq, &motg->notifier_work); return; } } #endif if (motg->ac_detect_count++ < 5) delay = 2 * HZ; queue_delayed_work(system_nrt_wq, &motg->ac_detect_work, delay); } else { USBH_INFO("AC charger\n"); motg->chg_type = USB_DCP_CHARGER; motg->chg_state = USB_CHG_STATE_DETECTED; motg->connect_type = CONNECT_TYPE_AC; motg->ac_detect_count = 0; msm_otg_start_peripheral(usb_phy->otg, 0); usb_phy->state = OTG_STATE_B_IDLE; queue_work(system_nrt_wq, &motg->sm_work); queue_work(motg->usb_wq, &motg->notifier_work); } } #ifndef CONFIG_CABLE_DETECT_8X60 static void htc_vbus_notify(int online) { cable_detection_vbus_irq_handler(); } #endif int msm_otg_get_vbus_state(void) { return htc_otg_vbus; } void msm_otg_set_vbus_state(int online) { static bool init; struct msm_otg *motg = the_msm_otg; struct usb_otg *otg = motg->phy.otg; USBH_INFO("%s: %d\n", __func__, online); htc_otg_vbus = online; #ifdef CONFIG_USB_OTG_HOST_CHG /* In A Host Mode, ignore received BSV interrupts */ if (online && otg->phy->state >= OTG_STATE_A_IDLE && cable_get_accessory_type() == DOCK_STATE_HOST_CHG_DOCK) return; #else if (online && otg->phy->state >= OTG_STATE_A_IDLE) return; #endif if (online) { pr_debug("PMIC: BSV set\n"); set_bit(B_SESS_VLD, &motg->inputs); /* VBUS interrupt will be triggered while HOST * 5V power turn on */ /*USB*/ if (motg->pdata->usb_uart_switch) motg->pdata->usb_uart_switch(0); if (motg->connect_type == 0) { motg->connect_type = CONNECT_TYPE_NOTIFY; queue_work(motg->usb_wq, &motg->notifier_work); } } else { pr_debug("PMIC: BSV clear\n"); clear_bit(B_SESS_VLD, &motg->inputs); /*UART*/ if (motg->pdata->usb_uart_switch) motg->pdata->usb_uart_switch(1); } if (!init) { init = true; complete(&pmic_vbus_init); pr_debug("PMIC: BSV init complete\n"); return; } wake_lock_timeout(&motg->cable_detect_wlock, 3 * HZ); if (atomic_read(&motg->pm_suspended)) motg->sm_work_pending = true; else queue_work(system_nrt_wq, &motg->sm_work); } void msm_otg_set_id_state(int id) { struct msm_otg *motg = the_msm_otg; if (id) { pr_debug("PMIC: ID set\n"); set_bit(ID, &motg->inputs); } else { pr_debug("PMIC: ID clear\n"); clear_bit(ID, &motg->inputs); } if (motg->phy.state != OTG_STATE_UNDEFINED) { /* Hold a wake_lock so that it will not sleep in detection */ wake_lock_timeout(&motg->cable_detect_wlock, 3 * HZ); schedule_work(&motg->sm_work); } } static void usb_host_cable_detect(bool cable_in) { if (cable_in) msm_otg_set_id_state(0); else msm_otg_set_id_state(1); } void msm_otg_set_disable_usb(int disable_usb) { struct msm_otg *motg = the_msm_otg; USB_disabled = disable_usb; if (!disable_usb) motg->chg_state = USB_CHG_STATE_UNDEFINED; queue_work(system_nrt_wq, &motg->sm_work); } static void msm_pmic_id_status_w(struct work_struct *w) { struct msm_otg *motg = container_of(w, struct msm_otg, pmic_id_status_work.work); int work = 0; unsigned long flags; local_irq_save(flags); if (irq_read_line(motg->pdata->pmic_id_irq)) { if (!test_and_set_bit(ID, &motg->inputs)) { pr_debug("PMIC: ID set\n"); work = 1; } } else { if (test_and_clear_bit(ID, &motg->inputs)) { pr_debug("PMIC: ID clear\n"); set_bit(A_BUS_REQ, &motg->inputs); work = 1; } } if (work && (motg->phy.state != OTG_STATE_UNDEFINED)) { if (atomic_read(&motg->pm_suspended)) motg->sm_work_pending = true; else queue_work(system_nrt_wq, &motg->sm_work); } local_irq_restore(flags); } #define MSM_PMIC_ID_STATUS_DELAY 5 /* 5msec */ static irqreturn_t msm_pmic_id_irq(int irq, void *data) { struct msm_otg *motg = data; if (!aca_id_turned_on) /*schedule delayed work for 5msec for ID line state to settle*/ queue_delayed_work(system_nrt_wq, &motg->pmic_id_status_work, msecs_to_jiffies(MSM_PMIC_ID_STATUS_DELAY)); return IRQ_HANDLED; } static int msm_otg_mode_show(struct seq_file *s, void *unused) { struct msm_otg *motg = s->private; struct usb_phy *phy = &motg->phy; switch (phy->state) { case OTG_STATE_A_HOST: seq_printf(s, "host\n"); break; case OTG_STATE_B_PERIPHERAL: seq_printf(s, "peripheral\n"); break; default: seq_printf(s, "none\n"); break; } return 0; } static int msm_otg_mode_open(struct inode *inode, struct file *file) { return single_open(file, msm_otg_mode_show, inode->i_private); } static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct msm_otg *motg = s->private; char buf[16]; struct usb_phy *phy = &motg->phy; int status = count; enum usb_mode_type req_mode; memset(buf, 0x00, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) { status = -EFAULT; goto out; } if (!strncmp(buf, "host", 4)) { req_mode = USB_HOST; } else if (!strncmp(buf, "peripheral", 10)) { req_mode = USB_PERIPHERAL; } else if (!strncmp(buf, "none", 4)) { req_mode = USB_NONE; } else { status = -EINVAL; goto out; } USB_INFO("%s: %s\n", __func__, (req_mode == USB_HOST)?"host" :(req_mode == USB_PERIPHERAL)?"peripheral":"none"); switch (req_mode) { case USB_NONE: switch (phy->state) { case OTG_STATE_A_HOST: case OTG_STATE_B_PERIPHERAL: set_bit(ID, &motg->inputs); clear_bit(B_SESS_VLD, &motg->inputs); break; default: goto out; } break; case USB_PERIPHERAL: switch (phy->state) { case OTG_STATE_B_IDLE: case OTG_STATE_A_HOST: set_bit(ID, &motg->inputs); set_bit(B_SESS_VLD, &motg->inputs); break; default: goto out; } break; case USB_HOST: switch (phy->state) { case OTG_STATE_B_IDLE: case OTG_STATE_B_PERIPHERAL: clear_bit(ID, &motg->inputs); break; default: goto out; } break; default: goto out; } pm_runtime_resume(phy->dev); queue_work(system_nrt_wq, &motg->sm_work); out: return status; } const struct file_operations msm_otg_mode_fops = { .open = msm_otg_mode_open, .read = seq_read, .write = msm_otg_mode_write, .llseek = seq_lseek, .release = single_release, }; static int msm_otg_show_otg_state(struct seq_file *s, void *unused) { struct msm_otg *motg = s->private; struct usb_phy *phy = &motg->phy; seq_printf(s, "%s\n", otg_state_string(phy->state)); return 0; } static int msm_otg_otg_state_open(struct inode *inode, struct file *file) { return single_open(file, msm_otg_show_otg_state, inode->i_private); } const struct file_operations msm_otg_state_fops = { .open = msm_otg_otg_state_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int msm_otg_show_chg_type(struct seq_file *s, void *unused) { struct msm_otg *motg = s->private; seq_printf(s, "%s\n", chg_to_string(motg->chg_type)); return 0; } static int msm_otg_chg_open(struct inode *inode, struct file *file) { return single_open(file, msm_otg_show_chg_type, inode->i_private); } const struct file_operations msm_otg_chg_fops = { .open = msm_otg_chg_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int msm_otg_aca_show(struct seq_file *s, void *unused) { if (debug_aca_enabled) seq_printf(s, "enabled\n"); else seq_printf(s, "disabled\n"); return 0; } static int msm_otg_aca_open(struct inode *inode, struct file *file) { return single_open(file, msm_otg_aca_show, inode->i_private); } static ssize_t msm_otg_aca_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { char buf[8]; memset(buf, 0x00, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "enable", 6)) debug_aca_enabled = true; else debug_aca_enabled = false; return count; } const struct file_operations msm_otg_aca_fops = { .open = msm_otg_aca_open, .read = seq_read, .write = msm_otg_aca_write, .llseek = seq_lseek, .release = single_release, }; static int msm_otg_bus_show(struct seq_file *s, void *unused) { if (debug_bus_voting_enabled) seq_printf(s, "enabled\n"); else seq_printf(s, "disabled\n"); return 0; } static int msm_otg_bus_open(struct inode *inode, struct file *file) { return single_open(file, msm_otg_bus_show, inode->i_private); } static ssize_t msm_otg_bus_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { char buf[8]; int ret; struct seq_file *s = file->private_data; struct msm_otg *motg = s->private; memset(buf, 0x00, sizeof(buf)); if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "enable", 6)) { /* Do not vote here. Let OTG statemachine decide when to vote */ debug_bus_voting_enabled = true; } else { debug_bus_voting_enabled = false; if (motg->bus_perf_client) { ret = msm_bus_scale_client_update_request( motg->bus_perf_client, 0); if (ret) dev_err(motg->phy.dev, "%s: Failed to devote " "for bus bw %d\n", __func__, ret); } } return count; } const struct file_operations msm_otg_bus_fops = { .open = msm_otg_bus_open, .read = seq_read, .write = msm_otg_bus_write, .llseek = seq_lseek, .release = single_release, }; static struct dentry *msm_otg_dbg_root; static int msm_otg_debugfs_init(struct msm_otg *motg) { struct dentry *msm_otg_dentry; msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL); if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root)) return -ENODEV; if (motg->pdata->mode == USB_OTG && motg->pdata->otg_control == OTG_USER_CONTROL) { msm_otg_dentry = debugfs_create_file("mode", S_IRUGO | S_IWUSR, msm_otg_dbg_root, motg, &msm_otg_mode_fops); if (!msm_otg_dentry) { debugfs_remove(msm_otg_dbg_root); msm_otg_dbg_root = NULL; return -ENODEV; } } msm_otg_dentry = debugfs_create_file("chg_type", S_IRUGO, msm_otg_dbg_root, motg, &msm_otg_chg_fops); if (!msm_otg_dentry) { debugfs_remove_recursive(msm_otg_dbg_root); return -ENODEV; } msm_otg_dentry = debugfs_create_file("aca", S_IRUGO | S_IWUSR, msm_otg_dbg_root, motg, &msm_otg_aca_fops); if (!msm_otg_dentry) { debugfs_remove_recursive(msm_otg_dbg_root); return -ENODEV; } msm_otg_dentry = debugfs_create_file("bus_voting", S_IRUGO | S_IWUSR, msm_otg_dbg_root, motg, &msm_otg_bus_fops); if (!msm_otg_dentry) { debugfs_remove_recursive(msm_otg_dbg_root); return -ENODEV; } msm_otg_dentry = debugfs_create_file("otg_state", S_IRUGO, msm_otg_dbg_root, motg, &msm_otg_state_fops); if (!msm_otg_dentry) { debugfs_remove_recursive(msm_otg_dbg_root); return -ENODEV; } return 0; } static void msm_otg_debugfs_cleanup(void) { debugfs_remove_recursive(msm_otg_dbg_root); } static struct t_usb_host_status_notifier usb_host_status_notifier = { .name = "usb_host", .func = usb_host_cable_detect, }; static u64 msm_otg_dma_mask = DMA_BIT_MASK(64); static struct platform_device *msm_otg_add_pdev( struct platform_device *ofdev, const char *name) { struct platform_device *pdev; const struct resource *res = ofdev->resource; unsigned int num = ofdev->num_resources; int retval; pdev = platform_device_alloc(name, -1); if (!pdev) { retval = -ENOMEM; goto error; } pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &msm_otg_dma_mask; if (num) { retval = platform_device_add_resources(pdev, res, num); if (retval) goto error; } retval = platform_device_add(pdev); if (retval) goto error; return pdev; error: platform_device_put(pdev); return ERR_PTR(retval); } static int msm_otg_setup_devices(struct platform_device *ofdev, enum usb_mode_type mode, bool init) { const char *gadget_name = "msm_hsusb"; const char *host_name = "msm_hsusb_host"; static struct platform_device *gadget_pdev; static struct platform_device *host_pdev; int retval = 0; if (!init) { if (gadget_pdev) platform_device_unregister(gadget_pdev); if (host_pdev) platform_device_unregister(host_pdev); return 0; } switch (mode) { case USB_OTG: /* fall through */ case USB_PERIPHERAL: gadget_pdev = msm_otg_add_pdev(ofdev, gadget_name); if (IS_ERR(gadget_pdev)) { retval = PTR_ERR(gadget_pdev); break; } if (mode == USB_PERIPHERAL) break; /* fall through */ case USB_HOST: host_pdev = msm_otg_add_pdev(ofdev, host_name); if (IS_ERR(host_pdev)) { retval = PTR_ERR(host_pdev); if (mode == USB_OTG) platform_device_unregister(gadget_pdev); } break; default: break; } return retval; } struct msm_otg_platform_data *msm_otg_dt_to_pdata(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct msm_otg_platform_data *pdata; int len = 0; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { pr_err("unable to allocate platform data\n"); return NULL; } of_get_property(node, "qcom,hsusb-otg-phy-init-seq", &len); if (len) { pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); if (!pdata->phy_init_seq) return NULL; of_property_read_u32_array(node, "qcom,hsusb-otg-phy-init-seq", pdata->phy_init_seq, len/sizeof(*pdata->phy_init_seq)); } of_property_read_u32(node, "qcom,hsusb-otg-power-budget", &pdata->power_budget); of_property_read_u32(node, "qcom,hsusb-otg-mode", &pdata->mode); of_property_read_u32(node, "qcom,hsusb-otg-otg-control", &pdata->otg_control); of_property_read_u32(node, "qcom,hsusb-otg-default-mode", &pdata->default_mode); of_property_read_u32(node, "qcom,hsusb-otg-phy-type", &pdata->phy_type); of_property_read_u32(node, "qcom,hsusb-otg-pmic-id-irq", &pdata->pmic_id_irq); return pdata; } static const char *event_string(enum usb_otg_event event) { switch (event) { case OTG_EVENT_DEV_CONN_TMOUT: return "DEV_CONN_TMOUT"; case OTG_EVENT_NO_RESP_FOR_HNP_ENABLE: return "NO_RESP_FOR_HNP_ENABLE"; case OTG_EVENT_HUB_NOT_SUPPORTED: return "HUB_NOT_SUPPORTED"; case OTG_EVENT_DEV_NOT_SUPPORTED: return "DEV_NOT_SUPPORTED"; case OTG_EVENT_HNP_FAILED: return "HNP_FAILED"; case OTG_EVENT_NO_RESP_FOR_SRP: return "NO_RESP_FOR_SRP"; case OTG_EVENT_INSUFFICIENT_POWER: return "DEV_NOT_SUPPORTED"; default: return "UNDEFINED"; } } static int msm_otg_send_event(struct usb_phy *phy, enum usb_otg_event event) { char module_name[16]; char udev_event[128]; char *envp[] = { module_name, udev_event, NULL }; int ret; /* we only broadcast customize event now*/ switch (event) { case OTG_EVENT_INSUFFICIENT_POWER: case OTG_EVENT_DEV_NOT_SUPPORTED: #if 1 USBH_DEBUG("sending %s event\n", event_string(event)); #endif snprintf(module_name, 16, "MODULE=%s", DRIVER_NAME); snprintf(udev_event, 128, "EVENT=%s", event_string(event)); ret = kobject_uevent_env(&phy->dev->kobj, KOBJ_CHANGE, envp); #if 1 if (ret < 0) USBH_ERR("uevent sending failed with ret = %d\n", ret); #endif break; default: return 0; break; } return ret; } static int msm_otg_send_event2(struct usb_otg *otg, enum usb_otg_event event){ return msm_otg_send_event(otg->phy,event); } static int __init msm_otg_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; struct msm_otg *motg; struct usb_phy *phy; struct msm_otg_platform_data *pdata; dev_info(&pdev->dev, "msm_otg probe\n"); if (pdev->dev.of_node) { dev_dbg(&pdev->dev, "device tree enabled\n"); pdata = msm_otg_dt_to_pdata(pdev); if (!pdata) return -ENOMEM; ret = msm_otg_setup_devices(pdev, pdata->mode, true); if (ret) { dev_err(&pdev->dev, "devices setup failed\n"); return ret; } } else if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "No platform data given. Bailing out\n"); return -ENODEV; } else { pdata = pdev->dev.platform_data; } motg = kzalloc(sizeof(struct msm_otg), GFP_KERNEL); if (!motg) { dev_err(&pdev->dev, "unable to allocate msm_otg\n"); return -ENOMEM; } motg->phy.otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL); if (!motg->phy.otg) { dev_err(&pdev->dev, "unable to allocate usb_otg\n"); ret = -ENOMEM; goto free_motg; } the_msm_otg = motg; motg->pdata = pdata; motg->connect_type_ready = 0; phy = &motg->phy; phy->dev = &pdev->dev; motg->reset_phy_before_lpm = pdata->reset_phy_before_lpm; /* * ACA ID_GND threshold range is overlapped with OTG ID_FLOAT. Hence * PHY treat ACA ID_GND as float and no interrupt is generated. But * PMIC can detect ACA ID_GND and generate an interrupt. */ if (aca_enabled() && motg->pdata->otg_control != OTG_PMIC_CONTROL) { dev_err(&pdev->dev, "ACA can not be enabled without PMIC\n"); ret = -EINVAL; goto free_otg; } /* initialize reset counter */ motg->reset_counter = 0; /* Some targets don't support PHY clock. */ motg->phy_reset_clk = clk_get(&pdev->dev, "phy_clk"); if (IS_ERR(motg->phy_reset_clk)) dev_err(&pdev->dev, "failed to get phy_clk\n"); /* * Targets on which link uses asynchronous reset methodology, * free running clock is not required during the reset. */ motg->clk = clk_get(&pdev->dev, "alt_core_clk"); if (IS_ERR(motg->clk)) dev_dbg(&pdev->dev, "alt_core_clk is not present\n"); else clk_set_rate(motg->clk, 60000000); /* * USB Core is running its protocol engine based on CORE CLK, * CORE CLK must be running at >55Mhz for correct HSUSB * operation and USB core cannot tolerate frequency changes on * CORE CLK. For such USB cores, vote for maximum clk frequency * on pclk source */ motg->core_clk = clk_get(&pdev->dev, "core_clk"); if (IS_ERR(motg->core_clk)) { motg->core_clk = NULL; dev_err(&pdev->dev, "failed to get core_clk\n"); ret = PTR_ERR(motg->core_clk); goto put_clk; } clk_set_rate(motg->core_clk, INT_MAX); motg->pclk = clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(motg->pclk)) { dev_err(&pdev->dev, "failed to get iface_clk\n"); ret = PTR_ERR(motg->pclk); goto put_core_clk; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get platform resource mem\n"); ret = -ENODEV; goto put_pclk; } motg->regs = ioremap(res->start, resource_size(res)); if (!motg->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto put_pclk; } dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs); motg->irq = platform_get_irq(pdev, 0); if (!motg->irq) { dev_err(&pdev->dev, "platform_get_irq failed\n"); ret = -ENODEV; goto free_regs; } motg->xo_handle = msm_xo_get(MSM_XO_TCXO_D0, "usb"); if (IS_ERR(motg->xo_handle)) { dev_err(&pdev->dev, "%s not able to get the handle " "to vote for TCXO D0 buffer\n", __func__); ret = PTR_ERR(motg->xo_handle); goto free_regs; } ret = msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_ON); if (ret) { dev_err(&pdev->dev, "%s failed to vote for TCXO " "D0 buffer%d\n", __func__, ret); goto free_xo_handle; } clk_prepare_enable(motg->pclk); motg->vdd_type = VDDCX_CORNER; hsusb_vddcx = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig"); if (IS_ERR(hsusb_vddcx)) { hsusb_vddcx = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX"); if (IS_ERR(hsusb_vddcx)) { dev_err(motg->phy.dev, "unable to get hsusb vddcx\n"); ret = PTR_ERR(hsusb_vddcx); goto devote_xo_handle; } motg->vdd_type = VDDCX; } ret = msm_hsusb_config_vddcx(1); if (ret) { dev_err(&pdev->dev, "hsusb vddcx configuration failed\n"); goto devote_xo_handle; } ret = regulator_enable(hsusb_vddcx); if (ret) { dev_err(&pdev->dev, "unable to enable the hsusb vddcx\n"); goto free_config_vddcx; } ret = msm_hsusb_ldo_init(motg, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg configuration failed\n"); goto free_hsusb_vddcx; } if (pdata->mhl_enable) { mhl_usb_hs_switch = devm_regulator_get(motg->phy.dev, "mhl_usb_hs_switch"); if (IS_ERR(mhl_usb_hs_switch)) { dev_err(&pdev->dev, "Unable to get mhl_usb_hs_switch\n"); ret = PTR_ERR(mhl_usb_hs_switch); goto free_ldo_init; } } ret = msm_hsusb_ldo_enable(motg, 1); if (ret) { dev_err(&pdev->dev, "hsusb vreg enable failed\n"); goto free_ldo_init; } clk_prepare_enable(motg->core_clk); writel(0, USB_USBINTR); writel(0, USB_OTGSC); /* Ensure that above STOREs are completed before enabling interrupts */ mb(); motg->usb_wq = create_singlethread_workqueue("msm_hsusb"); if (motg->usb_wq == 0) { USB_ERR("fail to create workqueue\n"); goto free_ldo_init; } wake_lock_init(&motg->wlock, WAKE_LOCK_SUSPEND, "msm_otg"); wake_lock_init(&motg->cable_detect_wlock, WAKE_LOCK_SUSPEND, "msm_usb_cable"); msm_otg_init_timer(motg); INIT_WORK(&motg->sm_work, msm_otg_sm_work); INIT_WORK(&motg->usb_disable_work, usb_disable_work); INIT_WORK(&motg->notifier_work, send_usb_connect_notify); INIT_DELAYED_WORK(&motg->ac_detect_work, ac_detect_expired_work); INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work); INIT_DELAYED_WORK(&motg->pmic_id_status_work, msm_pmic_id_status_w); setup_timer(&motg->id_timer, msm_otg_id_timer_func, (unsigned long) motg); motg->ac_detect_count = 0; ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED, "msm_otg", motg); if (ret) { dev_err(&pdev->dev, "request irq failed\n"); goto destroy_wlock; } phy->init = msm_otg_reset; phy->set_power = msm_otg_set_power; phy->set_suspend = msm_otg_set_suspend; phy->notify_usb_attached = msm_otg_notify_usb_attached; phy->notify_usb_disabled = msm_otg_notify_usb_disabled; phy->io_ops = &msm_otg_io_ops; phy->send_event = msm_otg_send_event; phy->otg->send_event = msm_otg_send_event2; phy->otg->phy = &motg->phy; phy->otg->set_host = msm_otg_set_host; phy->otg->set_peripheral = msm_otg_set_peripheral; phy->otg->start_hnp = msm_otg_start_hnp; phy->otg->start_srp = msm_otg_start_srp; ret = usb_set_transceiver(&motg->phy); if (ret) { dev_err(&pdev->dev, "usb_set_transceiver failed\n"); goto free_irq; } if (motg->pdata->mode == USB_OTG && motg->pdata->otg_control == OTG_PMIC_CONTROL) { if (motg->pdata->pmic_id_irq) { ret = request_irq(motg->pdata->pmic_id_irq, msm_pmic_id_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "msm_otg", motg); if (ret) { dev_err(&pdev->dev, "request irq failed for PMIC ID\n"); goto remove_phy; } } else { dev_dbg(&pdev->dev, "PMIC IRQ for ID notifications doesn't exist. Maybe monitor id pin by GPIO"); } } msm_hsusb_mhl_switch_enable(motg, 1); platform_set_drvdata(pdev, motg); device_init_wakeup(&pdev->dev, 1); motg->mA_port = IUNIT; ret = msm_otg_debugfs_init(motg); if (ret) dev_dbg(&pdev->dev, "mode debugfs file is" "not available\n"); if (motg->pdata->otg_control == OTG_PMIC_CONTROL) { #ifdef CONFIG_CABLE_DETECT_8X60 pm8921_charger_register_vbus_sn(&msm_otg_set_vbus_state); #else pm8921_charger_register_vbus_sn(&htc_vbus_notify); #endif } usb_host_detect_register_notifier(&usb_host_status_notifier); if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY) { #if 0 if (motg->pdata->otg_control == OTG_PMIC_CONTROL && (!(motg->pdata->mode == USB_OTG) || motg->pdata->pmic_id_irq)) motg->caps = ALLOW_PHY_RETENTION; #endif if (motg->pdata->otg_control == OTG_PMIC_CONTROL) motg->caps = ALLOW_PHY_RETENTION; if (motg->pdata->otg_control == OTG_PHY_CONTROL) motg->caps = ALLOW_PHY_RETENTION; if (is_msm_otg_support_power_collapse(motg)) { motg->caps |= ALLOW_PHY_POWER_COLLAPSE; USBH_DEBUG("%s support power collapse\n", __func__); } } if (motg->pdata->enable_lpm_on_dev_suspend) motg->caps |= ALLOW_LPM_ON_DEV_SUSPEND; pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); if (motg->pdata->bus_scale_table) { motg->bus_perf_client = msm_bus_scale_register_client(motg->pdata->bus_scale_table); if (!motg->bus_perf_client) dev_err(motg->phy.dev, "%s: Failed to register BUS " "scaling client!!\n", __func__); else debug_bus_voting_enabled = true; } return 0; remove_phy: usb_set_transceiver(NULL); free_irq: free_irq(motg->irq, motg); destroy_wlock: wake_lock_destroy(&motg->wlock); wake_lock_destroy(&motg->cable_detect_wlock); clk_disable_unprepare(motg->core_clk); msm_hsusb_ldo_enable(motg, 0); free_ldo_init: msm_hsusb_ldo_init(motg, 0); free_hsusb_vddcx: regulator_disable(hsusb_vddcx); free_config_vddcx: regulator_set_voltage(hsusb_vddcx, vdd_val[motg->vdd_type][VDD_NONE], vdd_val[motg->vdd_type][VDD_MAX]); devote_xo_handle: clk_disable_unprepare(motg->pclk); msm_xo_mode_vote(motg->xo_handle, MSM_XO_MODE_OFF); free_xo_handle: msm_xo_put(motg->xo_handle); free_regs: iounmap(motg->regs); put_pclk: clk_put(motg->pclk); put_core_clk: clk_put(motg->core_clk); put_clk: if (!IS_ERR(motg->clk)) clk_put(motg->clk); if (!IS_ERR(motg->phy_reset_clk)) clk_put(motg->phy_reset_clk); free_otg: kfree(motg->phy.otg); free_motg: kfree(motg); return ret; } static int __devexit msm_otg_remove(struct platform_device *pdev) { struct msm_otg *motg = platform_get_drvdata(pdev); struct usb_otg *otg = motg->phy.otg; int cnt = 0; if (otg->host || otg->gadget) return -EBUSY; USB_INFO("%s\n", __func__); if (pdev->dev.of_node) msm_otg_setup_devices(pdev, motg->pdata->mode, false); if (motg->pdata->otg_control == OTG_PMIC_CONTROL) pm8921_charger_unregister_vbus_sn(0); msm_otg_debugfs_cleanup(); cancel_delayed_work_sync(&motg->chg_work); cancel_delayed_work_sync(&motg->pmic_id_status_work); cancel_work_sync(&motg->sm_work); pm_runtime_resume(&pdev->dev); device_init_wakeup(&pdev->dev, 0); pm_runtime_disable(&pdev->dev); wake_lock_destroy(&motg->wlock); wake_lock_destroy(&motg->cable_detect_wlock); msm_hsusb_mhl_switch_enable(motg, 0); if (motg->pdata->pmic_id_irq) free_irq(motg->pdata->pmic_id_irq, motg); usb_set_transceiver(NULL); free_irq(motg->irq, motg); /* * Put PHY in low power mode. */ ulpi_read(otg->phy, 0x14); ulpi_write(otg->phy, 0x08, 0x09); writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC); while (cnt < PHY_SUSPEND_TIMEOUT_USEC) { if (readl(USB_PORTSC) & PORTSC_PHCD) break; udelay(1); cnt++; } if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) dev_err(otg->phy->dev, "Unable to suspend PHY\n"); clk_disable_unprepare(motg->pclk); clk_disable_unprepare(motg->core_clk); msm_xo_put(motg->xo_handle); msm_hsusb_ldo_enable(motg, 0); msm_hsusb_ldo_init(motg, 0); regulator_disable(hsusb_vddcx); regulator_set_voltage(hsusb_vddcx, vdd_val[motg->vdd_type][VDD_NONE], vdd_val[motg->vdd_type][VDD_MAX]); iounmap(motg->regs); pm_runtime_set_suspended(&pdev->dev); if (!IS_ERR(motg->phy_reset_clk)) clk_put(motg->phy_reset_clk); clk_put(motg->pclk); if (!IS_ERR(motg->clk)) clk_put(motg->clk); clk_put(motg->core_clk); if (motg->bus_perf_client) msm_bus_scale_unregister_client(motg->bus_perf_client); kfree(motg->phy.otg); kfree(motg); return 0; } #ifdef CONFIG_PM_RUNTIME static int msm_otg_runtime_idle(struct device *dev) { struct msm_otg *motg = dev_get_drvdata(dev); struct usb_phy *phy = &motg->phy; dev_dbg(dev, "OTG runtime idle\n"); if (phy->state == OTG_STATE_UNDEFINED) return -EAGAIN; else return 0; } static int msm_otg_runtime_suspend(struct device *dev) { struct msm_otg *motg = dev_get_drvdata(dev); dev_dbg(dev, "OTG runtime suspend\n"); return msm_otg_suspend(motg); } static int msm_otg_runtime_resume(struct device *dev) { struct msm_otg *motg = dev_get_drvdata(dev); dev_dbg(dev, "OTG runtime resume\n"); pm_runtime_get_noresume(dev); return msm_otg_resume(motg); } #endif #ifdef CONFIG_PM_SLEEP static int msm_otg_pm_suspend(struct device *dev) { int ret = 0; struct msm_otg *motg = dev_get_drvdata(dev); dev_dbg(dev, "OTG PM suspend\n"); atomic_set(&motg->pm_suspended, 1); ret = msm_otg_suspend(motg); if (ret) atomic_set(&motg->pm_suspended, 0); return ret; } static int msm_otg_pm_resume(struct device *dev) { int ret = 0; struct msm_otg *motg = dev_get_drvdata(dev); dev_dbg(dev, "OTG PM resume\n"); atomic_set(&motg->pm_suspended, 0); if (motg->sm_work_pending) { motg->sm_work_pending = false; pm_runtime_get_noresume(dev); ret = msm_otg_resume(motg); /* Update runtime PM status */ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); queue_work(system_nrt_wq, &motg->sm_work); } return ret; } #endif #ifdef CONFIG_PM static const struct dev_pm_ops msm_otg_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume) SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume, msm_otg_runtime_idle) }; #endif static struct of_device_id msm_otg_dt_match[] = { { .compatible = "qcom,hsusb-otg", }, {} }; static struct platform_driver msm_otg_driver = { .remove = __devexit_p(msm_otg_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &msm_otg_dev_pm_ops, #endif .of_match_table = msm_otg_dt_match, }, }; static int __init msm_otg_init(void) { return platform_driver_probe(&msm_otg_driver, msm_otg_probe); } static void __exit msm_otg_exit(void) { platform_driver_unregister(&msm_otg_driver); } module_init(msm_otg_init); module_exit(msm_otg_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MSM USB transceiver driver");
gpl-2.0
ftCommunity/ft-TXT
board/knobloch/TXT/board-support/ti-linux/sound/firewire/fireworks/fireworks_proc.c
57
6893
/* * fireworks_proc.c - a part of driver for Fireworks based devices * * Copyright (c) 2009-2010 Clemens Ladisch * Copyright (c) 2013-2014 Takashi Sakamoto * * Licensed under the terms of the GNU General Public License, version 2. */ #include "./fireworks.h" static inline const char* get_phys_name(struct snd_efw_phys_grp *grp, bool input) { const char *const ch_type[] = { "Analog", "S/PDIF", "ADAT", "S/PDIF or ADAT", "Mirroring", "Headphones", "I2S", "Guitar", "Pirzo Guitar", "Guitar String", }; if (grp->type < ARRAY_SIZE(ch_type)) return ch_type[grp->type]; else if (input) return "Input"; else return "Output"; } static void proc_read_hwinfo(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_efw *efw = entry->private_data; unsigned short i; struct snd_efw_hwinfo *hwinfo; hwinfo = kmalloc(sizeof(struct snd_efw_hwinfo), GFP_KERNEL); if (hwinfo == NULL) return; if (snd_efw_command_get_hwinfo(efw, hwinfo) < 0) goto end; snd_iprintf(buffer, "guid_hi: 0x%X\n", hwinfo->guid_hi); snd_iprintf(buffer, "guid_lo: 0x%X\n", hwinfo->guid_lo); snd_iprintf(buffer, "type: 0x%X\n", hwinfo->type); snd_iprintf(buffer, "version: 0x%X\n", hwinfo->version); snd_iprintf(buffer, "vendor_name: %s\n", hwinfo->vendor_name); snd_iprintf(buffer, "model_name: %s\n", hwinfo->model_name); snd_iprintf(buffer, "dsp_version: 0x%X\n", hwinfo->dsp_version); snd_iprintf(buffer, "arm_version: 0x%X\n", hwinfo->arm_version); snd_iprintf(buffer, "fpga_version: 0x%X\n", hwinfo->fpga_version); snd_iprintf(buffer, "flags: 0x%X\n", hwinfo->flags); snd_iprintf(buffer, "max_sample_rate: 0x%X\n", hwinfo->max_sample_rate); snd_iprintf(buffer, "min_sample_rate: 0x%X\n", hwinfo->min_sample_rate); snd_iprintf(buffer, "supported_clock: 0x%X\n", hwinfo->supported_clocks); snd_iprintf(buffer, "phys out: 0x%X\n", hwinfo->phys_out); snd_iprintf(buffer, "phys in: 0x%X\n", hwinfo->phys_in); snd_iprintf(buffer, "phys in grps: 0x%X\n", hwinfo->phys_in_grp_count); for (i = 0; i < hwinfo->phys_in_grp_count; i++) { snd_iprintf(buffer, "phys in grp[0x%d]: type 0x%d, count 0x%d\n", i, hwinfo->phys_out_grps[i].type, hwinfo->phys_out_grps[i].count); } snd_iprintf(buffer, "phys out grps: 0x%X\n", hwinfo->phys_out_grp_count); for (i = 0; i < hwinfo->phys_out_grp_count; i++) { snd_iprintf(buffer, "phys out grps[0x%d]: type 0x%d, count 0x%d\n", i, hwinfo->phys_out_grps[i].type, hwinfo->phys_out_grps[i].count); } snd_iprintf(buffer, "amdtp rx pcm channels 1x: 0x%X\n", hwinfo->amdtp_rx_pcm_channels); snd_iprintf(buffer, "amdtp tx pcm channels 1x: 0x%X\n", hwinfo->amdtp_tx_pcm_channels); snd_iprintf(buffer, "amdtp rx pcm channels 2x: 0x%X\n", hwinfo->amdtp_rx_pcm_channels_2x); snd_iprintf(buffer, "amdtp tx pcm channels 2x: 0x%X\n", hwinfo->amdtp_tx_pcm_channels_2x); snd_iprintf(buffer, "amdtp rx pcm channels 4x: 0x%X\n", hwinfo->amdtp_rx_pcm_channels_4x); snd_iprintf(buffer, "amdtp tx pcm channels 4x: 0x%X\n", hwinfo->amdtp_tx_pcm_channels_4x); snd_iprintf(buffer, "midi out ports: 0x%X\n", hwinfo->midi_out_ports); snd_iprintf(buffer, "midi in ports: 0x%X\n", hwinfo->midi_in_ports); snd_iprintf(buffer, "mixer playback channels: 0x%X\n", hwinfo->mixer_playback_channels); snd_iprintf(buffer, "mixer capture channels: 0x%X\n", hwinfo->mixer_capture_channels); end: kfree(hwinfo); } static void proc_read_clock(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_efw *efw = entry->private_data; enum snd_efw_clock_source clock_source; unsigned int sampling_rate; if (snd_efw_command_get_clock_source(efw, &clock_source) < 0) return; if (snd_efw_command_get_sampling_rate(efw, &sampling_rate) < 0) return; snd_iprintf(buffer, "Clock Source: %d\n", clock_source); snd_iprintf(buffer, "Sampling Rate: %d\n", sampling_rate); } /* * NOTE: * dB = 20 * log10(linear / 0x01000000) * -144.0 dB when linear is 0 */ static void proc_read_phys_meters(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_efw *efw = entry->private_data; struct snd_efw_phys_meters *meters; unsigned int g, c, m, max, size; const char *name; u32 *linear; int err; size = sizeof(struct snd_efw_phys_meters) + (efw->phys_in + efw->phys_out) * sizeof(u32); meters = kzalloc(size, GFP_KERNEL); if (meters == NULL) return; err = snd_efw_command_get_phys_meters(efw, meters, size); if (err < 0) goto end; snd_iprintf(buffer, "Physical Meters:\n"); m = 0; max = min(efw->phys_out, meters->out_meters); linear = meters->values; snd_iprintf(buffer, " %d Outputs:\n", max); for (g = 0; g < efw->phys_out_grp_count; g++) { name = get_phys_name(&efw->phys_out_grps[g], false); for (c = 0; c < efw->phys_out_grps[g].count; c++) { if (m < max) snd_iprintf(buffer, "\t%s [%d]: %d\n", name, c, linear[m++]); } } m = 0; max = min(efw->phys_in, meters->in_meters); linear = meters->values + meters->out_meters; snd_iprintf(buffer, " %d Inputs:\n", max); for (g = 0; g < efw->phys_in_grp_count; g++) { name = get_phys_name(&efw->phys_in_grps[g], true); for (c = 0; c < efw->phys_in_grps[g].count; c++) if (m < max) snd_iprintf(buffer, "\t%s [%d]: %d\n", name, c, linear[m++]); } end: kfree(meters); } static void proc_read_queues_state(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_efw *efw = entry->private_data; unsigned int consumed; if (efw->pull_ptr > efw->push_ptr) consumed = snd_efw_resp_buf_size - (unsigned int)(efw->pull_ptr - efw->push_ptr); else consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr); snd_iprintf(buffer, "%d %d/%d\n", efw->resp_queues, consumed, snd_efw_resp_buf_size); } static void add_node(struct snd_efw *efw, struct snd_info_entry *root, const char *name, void (*op)(struct snd_info_entry *e, struct snd_info_buffer *b)) { struct snd_info_entry *entry; entry = snd_info_create_card_entry(efw->card, name, root); if (entry == NULL) return; snd_info_set_text_ops(entry, efw, op); if (snd_info_register(entry) < 0) snd_info_free_entry(entry); } void snd_efw_proc_init(struct snd_efw *efw) { struct snd_info_entry *root; /* * All nodes are automatically removed at snd_card_disconnect(), * by following to link list. */ root = snd_info_create_card_entry(efw->card, "firewire", efw->card->proc_root); if (root == NULL) return; root->mode = S_IFDIR | S_IRUGO | S_IXUGO; if (snd_info_register(root) < 0) { snd_info_free_entry(root); return; } add_node(efw, root, "clock", proc_read_clock); add_node(efw, root, "firmware", proc_read_hwinfo); add_node(efw, root, "meters", proc_read_phys_meters); add_node(efw, root, "queues", proc_read_queues_state); }
gpl-2.0
CharlieMarshall/xbmc
xbmc/utils/Base64.cpp
57
3748
/* * Copyright (C) 2011-2013 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "Base64.h" #define PADDING '=' using namespace std; const std::string Base64::m_characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789+/"; void Base64::Encode(const char* input, unsigned int length, std::string &output) { if (input == NULL || length == 0) return; long l; output.clear(); output.reserve(((length + 2) / 3) * 4); for (unsigned int i = 0; i < length; i += 3) { l = ((((unsigned long) input[i]) << 16) & 0xFFFFFF) | ((((i + 1) < length) ? (((unsigned long) input[i + 1]) << 8) : 0) & 0xFFFF) | ((((i + 2) < length) ? (((unsigned long) input[i + 2]) << 0) : 0) & 0x00FF); output.push_back(m_characters[(l >> 18) & 0x3F]); output.push_back(m_characters[(l >> 12) & 0x3F]); if (i + 1 < length) output.push_back(m_characters[(l >> 6) & 0x3F]); if (i + 2 < length) output.push_back(m_characters[(l >> 0) & 0x3F]); } int left = 3 - (length % 3); if (length % 3) { for (int i = 0; i < left; i++) output.push_back(PADDING); } } std::string Base64::Encode(const char* input, unsigned int length) { std::string output; Encode(input, length, output); return output; } void Base64::Encode(const std::string &input, std::string &output) { Encode(input.c_str(), input.size(), output); } std::string Base64::Encode(const std::string &input) { std::string output; Encode(input, output); return output; } void Base64::Decode(const char* input, unsigned int length, std::string &output) { if (input == NULL || length == 0) return; long l; output.clear(); for (unsigned int index = 0; index < length; index++) { if (input[index] == '=') { length = index; break; } } output.reserve(length - ((length + 2) / 4)); for (unsigned int i = 0; i < length; i += 4) { l = ((((unsigned long) m_characters.find(input[i])) & 0x3F) << 18); l |= (((i + 1) < length) ? ((((unsigned long) m_characters.find(input[i + 1])) & 0x3F) << 12) : 0); l |= (((i + 2) < length) ? ((((unsigned long) m_characters.find(input[i + 2])) & 0x3F) << 6) : 0); l |= (((i + 3) < length) ? ((((unsigned long) m_characters.find(input[i + 3])) & 0x3F) << 0) : 0); output.push_back((char)((l >> 16) & 0xFF)); if (i + 2 < length) output.push_back((char)((l >> 8) & 0xFF)); if (i + 3 < length) output.push_back((char)((l >> 0) & 0xFF)); } } std::string Base64::Decode(const char* input, unsigned int length) { std::string output; Decode(input, length, output); return output; } void Base64::Decode(const std::string &input, std::string &output) { size_t length = input.find_first_of(PADDING); if (length == string::npos) length = input.size(); Decode(input.c_str(), length, output); } std::string Base64::Decode(const std::string &input) { std::string output; Decode(input, output); return output; }
gpl-2.0
mike-dunn/linux-treo680
drivers/iommu/omap-iovmm.c
569
17670
/* * omap iommu: simple virtual address space management * * Copyright (C) 2008-2009 Nokia Corporation * * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/device.h> #include <linux/scatterlist.h> #include <linux/iommu.h> #include <linux/omap-iommu.h> #include <linux/platform_data/iommu-omap.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> #include "omap-iopgtable.h" #include "omap-iommu.h" /* * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma) * * lower 16 bit is used for h/w and upper 16 bit is for s/w. */ #define IOVMF_SW_SHIFT 16 /* * iovma: h/w flags derived from cam and ram attribute */ #define IOVMF_CAM_MASK (~((1 << 10) - 1)) #define IOVMF_RAM_MASK (~IOVMF_CAM_MASK) #define IOVMF_PGSZ_MASK (3 << 0) #define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M #define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K #define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K #define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M #define IOVMF_ENDIAN_MASK (1 << 9) #define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG #define IOVMF_ELSZ_MASK (3 << 7) #define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16 #define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32 #define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE #define IOVMF_MIXED_MASK (1 << 6) #define IOVMF_MIXED MMU_RAM_MIXED /* * iovma: s/w flags, used for mapping and umapping internally. */ #define IOVMF_MMIO (1 << IOVMF_SW_SHIFT) #define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT) #define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT) /* "superpages" is supported just with physically linear pages */ #define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT)) #define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT)) #define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT)) #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) static struct kmem_cache *iovm_area_cachep; /* return the offset of the first scatterlist entry in a sg table */ static unsigned int sgtable_offset(const struct sg_table *sgt) { if (!sgt || !sgt->nents) return 0; return sgt->sgl->offset; } /* return total bytes of sg buffers */ static size_t sgtable_len(const struct sg_table *sgt) { unsigned int i, total = 0; struct scatterlist *sg; if (!sgt) return 0; for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes; bytes = sg->length + sg->offset; if (!iopgsz_ok(bytes)) { pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", __func__, i, bytes, sg->offset); return 0; } if (i && sg->offset) { pr_err("%s: sg[%d] offset not allowed in internal entries\n", __func__, i); return 0; } total += bytes; } return total; } #define sgtable_ok(x) (!!sgtable_len(x)) static unsigned max_alignment(u32 addr) { int i; unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) ; return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; } /* * calculate the optimal number sg elements from total bytes based on * iommu superpages */ static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) { unsigned nr_entries = 0, ent_sz; if (!IS_ALIGNED(bytes, PAGE_SIZE)) { pr_err("%s: wrong size %08x\n", __func__, bytes); return 0; } while (bytes) { ent_sz = max_alignment(da | pa); ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); nr_entries++; da += ent_sz; pa += ent_sz; bytes -= ent_sz; } return nr_entries; } /* allocate and initialize sg_table header(a kind of 'superblock') */ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, u32 da, u32 pa) { unsigned int nr_entries; int err; struct sg_table *sgt; if (!bytes) return ERR_PTR(-EINVAL); if (!IS_ALIGNED(bytes, PAGE_SIZE)) return ERR_PTR(-EINVAL); if (flags & IOVMF_LINEAR) { nr_entries = sgtable_nents(bytes, da, pa); if (!nr_entries) return ERR_PTR(-EINVAL); } else nr_entries = bytes / PAGE_SIZE; sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return ERR_PTR(-ENOMEM); err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); if (err) { kfree(sgt); return ERR_PTR(err); } pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); return sgt; } /* free sg_table header(a kind of superblock) */ static void sgtable_free(struct sg_table *sgt) { if (!sgt) return; sg_free_table(sgt); kfree(sgt); pr_debug("%s: sgt:%p\n", __func__, sgt); } /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ static void *vmap_sg(const struct sg_table *sgt) { u32 va; size_t total; unsigned int i; struct scatterlist *sg; struct vm_struct *new; const struct mem_type *mtype; mtype = get_mem_type(MT_DEVICE); if (!mtype) return ERR_PTR(-EINVAL); total = sgtable_len(sgt); if (!total) return ERR_PTR(-EINVAL); new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); if (!new) return ERR_PTR(-ENOMEM); va = (u32)new->addr; for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes; u32 pa; int err; pa = sg_phys(sg) - sg->offset; bytes = sg->length + sg->offset; BUG_ON(bytes != PAGE_SIZE); err = ioremap_page(va, pa, mtype); if (err) goto err_out; va += bytes; } flush_cache_vmap((unsigned long)new->addr, (unsigned long)(new->addr + total)); return new->addr; err_out: WARN_ON(1); /* FIXME: cleanup some mpu mappings */ vunmap(new->addr); return ERR_PTR(-EAGAIN); } static inline void vunmap_sg(const void *va) { vunmap(va); } static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, const u32 da) { struct iovm_struct *tmp; list_for_each_entry(tmp, &obj->mmap, list) { if ((da >= tmp->da_start) && (da < tmp->da_end)) { size_t len; len = tmp->da_end - tmp->da_start; dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, tmp->da_start, da, tmp->da_end, len, tmp->flags); return tmp; } } return NULL; } /** * omap_find_iovm_area - find iovma which includes @da * @dev: client device * @da: iommu device virtual address * * Find the existing iovma starting at @da */ struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da) { struct omap_iommu *obj = dev_to_omap_iommu(dev); struct iovm_struct *area; mutex_lock(&obj->mmap_lock); area = __find_iovm_area(obj, da); mutex_unlock(&obj->mmap_lock); return area; } EXPORT_SYMBOL_GPL(omap_find_iovm_area); /* * This finds the hole(area) which fits the requested address and len * in iovmas mmap, and returns the new allocated iovma. */ static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, size_t bytes, u32 flags) { struct iovm_struct *new, *tmp; u32 start, prev_end, alignment; if (!obj || !bytes) return ERR_PTR(-EINVAL); start = da; alignment = PAGE_SIZE; if (~flags & IOVMF_DA_FIXED) { /* Don't map address 0 */ start = obj->da_start ? obj->da_start : alignment; if (flags & IOVMF_LINEAR) alignment = iopgsz_max(bytes); start = roundup(start, alignment); } else if (start < obj->da_start || start > obj->da_end || obj->da_end - start < bytes) { return ERR_PTR(-EINVAL); } tmp = NULL; if (list_empty(&obj->mmap)) goto found; prev_end = 0; list_for_each_entry(tmp, &obj->mmap, list) { if (prev_end > start) break; if (tmp->da_start > start && (tmp->da_start - start) >= bytes) goto found; if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) start = roundup(tmp->da_end + 1, alignment); prev_end = tmp->da_end; } if ((start >= prev_end) && (obj->da_end - start >= bytes)) goto found; dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", __func__, da, bytes, flags); return ERR_PTR(-EINVAL); found: new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); new->iommu = obj; new->da_start = start; new->da_end = start + bytes; new->flags = flags; /* * keep ascending order of iovmas */ if (tmp) list_add_tail(&new->list, &tmp->list); else list_add(&new->list, &obj->mmap); dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", __func__, new->da_start, start, new->da_end, bytes, flags); return new; } static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) { size_t bytes; BUG_ON(!obj || !area); bytes = area->da_end - area->da_start; dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", __func__, area->da_start, area->da_end, bytes, area->flags); list_del(&area->list); kmem_cache_free(iovm_area_cachep, area); } /** * omap_da_to_va - convert (d) to (v) * @dev: client device * @da: iommu device virtual address * @va: mpu virtual address * * Returns mpu virtual addr which corresponds to a given device virtual addr */ void *omap_da_to_va(struct device *dev, u32 da) { struct omap_iommu *obj = dev_to_omap_iommu(dev); void *va = NULL; struct iovm_struct *area; mutex_lock(&obj->mmap_lock); area = __find_iovm_area(obj, da); if (!area) { dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); goto out; } va = area->va; out: mutex_unlock(&obj->mmap_lock); return va; } EXPORT_SYMBOL_GPL(omap_da_to_va); static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) { unsigned int i; struct scatterlist *sg; void *va = _va; void *va_end; for_each_sg(sgt->sgl, sg, sgt->nents, i) { struct page *pg; const size_t bytes = PAGE_SIZE; /* * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' */ pg = vmalloc_to_page(va); BUG_ON(!pg); sg_set_page(sg, pg, bytes, 0); va += bytes; } va_end = _va + PAGE_SIZE * i; } static inline void sgtable_drain_vmalloc(struct sg_table *sgt) { /* * Actually this is not necessary at all, just exists for * consistency of the code readability. */ BUG_ON(!sgt); } /* create 'da' <-> 'pa' mapping from 'sgt' */ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, const struct sg_table *sgt, u32 flags) { int err; unsigned int i, j; struct scatterlist *sg; u32 da = new->da_start; if (!domain || !sgt) return -EINVAL; BUG_ON(!sgtable_ok(sgt)); for_each_sg(sgt->sgl, sg, sgt->nents, i) { u32 pa; size_t bytes; pa = sg_phys(sg) - sg->offset; bytes = sg->length + sg->offset; flags &= ~IOVMF_PGSZ_MASK; if (bytes_to_iopgsz(bytes) < 0) goto err_out; pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, i, da, pa, bytes); err = iommu_map(domain, da, pa, bytes, flags); if (err) goto err_out; da += bytes; } return 0; err_out: da = new->da_start; for_each_sg(sgt->sgl, sg, i, j) { size_t bytes; bytes = sg->length + sg->offset; /* ignore failures.. we're already handling one */ iommu_unmap(domain, da, bytes); da += bytes; } return err; } /* release 'da' <-> 'pa' mapping */ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, struct iovm_struct *area) { u32 start; size_t total = area->da_end - area->da_start; const struct sg_table *sgt = area->sgt; struct scatterlist *sg; int i; size_t unmapped; BUG_ON(!sgtable_ok(sgt)); BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); start = area->da_start; for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes; bytes = sg->length + sg->offset; unmapped = iommu_unmap(domain, start, bytes); if (unmapped < bytes) break; dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", __func__, start, bytes, area->flags); BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); total -= bytes; start += bytes; } BUG_ON(total); } /* template function for all unmapping */ static struct sg_table *unmap_vm_area(struct iommu_domain *domain, struct omap_iommu *obj, const u32 da, void (*fn)(const void *), u32 flags) { struct sg_table *sgt = NULL; struct iovm_struct *area; if (!IS_ALIGNED(da, PAGE_SIZE)) { dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); return NULL; } mutex_lock(&obj->mmap_lock); area = __find_iovm_area(obj, da); if (!area) { dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); goto out; } if ((area->flags & flags) != flags) { dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, area->flags); goto out; } sgt = (struct sg_table *)area->sgt; unmap_iovm_area(domain, obj, area); fn(area->va); dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, area->da_start, da, area->da_end, area->da_end - area->da_start, area->flags); free_iovm_area(obj, area); out: mutex_unlock(&obj->mmap_lock); return sgt; } static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, void *va, size_t bytes, u32 flags) { int err = -ENOMEM; struct iovm_struct *new; mutex_lock(&obj->mmap_lock); new = alloc_iovm_area(obj, da, bytes, flags); if (IS_ERR(new)) { err = PTR_ERR(new); goto err_alloc_iovma; } new->va = va; new->sgt = sgt; if (map_iovm_area(domain, new, sgt, new->flags)) goto err_map; mutex_unlock(&obj->mmap_lock); dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", __func__, new->da_start, bytes, new->flags, va); return new->da_start; err_map: free_iovm_area(obj, new); err_alloc_iovma: mutex_unlock(&obj->mmap_lock); return err; } static inline u32 __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, void *va, size_t bytes, u32 flags) { return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); } /** * omap_iommu_vmap - (d)-(p)-(v) address mapper * @domain: iommu domain * @dev: client device * @sgt: address of scatter gather table * @flags: iovma and page property * * Creates 1-n-1 mapping with given @sgt and returns @da. * All @sgt element must be io page size aligned. */ u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da, const struct sg_table *sgt, u32 flags) { struct omap_iommu *obj = dev_to_omap_iommu(dev); size_t bytes; void *va = NULL; if (!obj || !obj->dev || !sgt) return -EINVAL; bytes = sgtable_len(sgt); if (!bytes) return -EINVAL; bytes = PAGE_ALIGN(bytes); if (flags & IOVMF_MMIO) { va = vmap_sg(sgt); if (IS_ERR(va)) return PTR_ERR(va); } flags |= IOVMF_DISCONT; flags |= IOVMF_MMIO; da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); if (IS_ERR_VALUE(da)) vunmap_sg(va); return da + sgtable_offset(sgt); } EXPORT_SYMBOL_GPL(omap_iommu_vmap); /** * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' * @domain: iommu domain * @dev: client device * @da: iommu device virtual address * * Free the iommu virtually contiguous memory area starting at * @da, which was returned by 'omap_iommu_vmap()'. */ struct sg_table * omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da) { struct omap_iommu *obj = dev_to_omap_iommu(dev); struct sg_table *sgt; /* * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. * Just returns 'sgt' to the caller to free */ da &= PAGE_MASK; sgt = unmap_vm_area(domain, obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); if (!sgt) dev_dbg(obj->dev, "%s: No sgt\n", __func__); return sgt; } EXPORT_SYMBOL_GPL(omap_iommu_vunmap); /** * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper * @dev: client device * @da: contiguous iommu virtual memory * @bytes: allocation size * @flags: iovma and page property * * Allocate @bytes linearly and creates 1-n-1 mapping and returns * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. */ u32 omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da, size_t bytes, u32 flags) { struct omap_iommu *obj = dev_to_omap_iommu(dev); void *va; struct sg_table *sgt; if (!obj || !obj->dev || !bytes) return -EINVAL; bytes = PAGE_ALIGN(bytes); va = vmalloc(bytes); if (!va) return -ENOMEM; flags |= IOVMF_DISCONT; flags |= IOVMF_ALLOC; sgt = sgtable_alloc(bytes, flags, da, 0); if (IS_ERR(sgt)) { da = PTR_ERR(sgt); goto err_sgt_alloc; } sgtable_fill_vmalloc(sgt, va); da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); if (IS_ERR_VALUE(da)) goto err_iommu_vmap; return da; err_iommu_vmap: sgtable_drain_vmalloc(sgt); sgtable_free(sgt); err_sgt_alloc: vfree(va); return da; } EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); /** * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' * @dev: client device * @da: iommu device virtual address * * Frees the iommu virtually continuous memory area starting at * @da, as obtained from 'omap_iommu_vmalloc()'. */ void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev, const u32 da) { struct omap_iommu *obj = dev_to_omap_iommu(dev); struct sg_table *sgt; sgt = unmap_vm_area(domain, obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); if (!sgt) dev_dbg(obj->dev, "%s: No sgt\n", __func__); sgtable_free(sgt); } EXPORT_SYMBOL_GPL(omap_iommu_vfree); static int __init iovmm_init(void) { const unsigned long flags = SLAB_HWCACHE_ALIGN; struct kmem_cache *p; p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, flags, NULL); if (!p) return -ENOMEM; iovm_area_cachep = p; return 0; } module_init(iovmm_init); static void __exit iovmm_exit(void) { kmem_cache_destroy(iovm_area_cachep); } module_exit(iovmm_exit); MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
lexi6725/linux-3.14.26
drivers/iommu/omap-iovmm.c
569
17670
/* * omap iommu: simple virtual address space management * * Copyright (C) 2008-2009 Nokia Corporation * * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/device.h> #include <linux/scatterlist.h> #include <linux/iommu.h> #include <linux/omap-iommu.h> #include <linux/platform_data/iommu-omap.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> #include "omap-iopgtable.h" #include "omap-iommu.h" /* * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma) * * lower 16 bit is used for h/w and upper 16 bit is for s/w. */ #define IOVMF_SW_SHIFT 16 /* * iovma: h/w flags derived from cam and ram attribute */ #define IOVMF_CAM_MASK (~((1 << 10) - 1)) #define IOVMF_RAM_MASK (~IOVMF_CAM_MASK) #define IOVMF_PGSZ_MASK (3 << 0) #define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M #define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K #define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K #define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M #define IOVMF_ENDIAN_MASK (1 << 9) #define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG #define IOVMF_ELSZ_MASK (3 << 7) #define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16 #define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32 #define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE #define IOVMF_MIXED_MASK (1 << 6) #define IOVMF_MIXED MMU_RAM_MIXED /* * iovma: s/w flags, used for mapping and umapping internally. */ #define IOVMF_MMIO (1 << IOVMF_SW_SHIFT) #define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT) #define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT) /* "superpages" is supported just with physically linear pages */ #define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT)) #define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT)) #define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT)) #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) static struct kmem_cache *iovm_area_cachep; /* return the offset of the first scatterlist entry in a sg table */ static unsigned int sgtable_offset(const struct sg_table *sgt) { if (!sgt || !sgt->nents) return 0; return sgt->sgl->offset; } /* return total bytes of sg buffers */ static size_t sgtable_len(const struct sg_table *sgt) { unsigned int i, total = 0; struct scatterlist *sg; if (!sgt) return 0; for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes; bytes = sg->length + sg->offset; if (!iopgsz_ok(bytes)) { pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", __func__, i, bytes, sg->offset); return 0; } if (i && sg->offset) { pr_err("%s: sg[%d] offset not allowed in internal entries\n", __func__, i); return 0; } total += bytes; } return total; } #define sgtable_ok(x) (!!sgtable_len(x)) static unsigned max_alignment(u32 addr) { int i; unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) ; return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; } /* * calculate the optimal number sg elements from total bytes based on * iommu superpages */ static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) { unsigned nr_entries = 0, ent_sz; if (!IS_ALIGNED(bytes, PAGE_SIZE)) { pr_err("%s: wrong size %08x\n", __func__, bytes); return 0; } while (bytes) { ent_sz = max_alignment(da | pa); ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); nr_entries++; da += ent_sz; pa += ent_sz; bytes -= ent_sz; } return nr_entries; } /* allocate and initialize sg_table header(a kind of 'superblock') */ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, u32 da, u32 pa) { unsigned int nr_entries; int err; struct sg_table *sgt; if (!bytes) return ERR_PTR(-EINVAL); if (!IS_ALIGNED(bytes, PAGE_SIZE)) return ERR_PTR(-EINVAL); if (flags & IOVMF_LINEAR) { nr_entries = sgtable_nents(bytes, da, pa); if (!nr_entries) return ERR_PTR(-EINVAL); } else nr_entries = bytes / PAGE_SIZE; sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return ERR_PTR(-ENOMEM); err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); if (err) { kfree(sgt); return ERR_PTR(err); } pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); return sgt; } /* free sg_table header(a kind of superblock) */ static void sgtable_free(struct sg_table *sgt) { if (!sgt) return; sg_free_table(sgt); kfree(sgt); pr_debug("%s: sgt:%p\n", __func__, sgt); } /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ static void *vmap_sg(const struct sg_table *sgt) { u32 va; size_t total; unsigned int i; struct scatterlist *sg; struct vm_struct *new; const struct mem_type *mtype; mtype = get_mem_type(MT_DEVICE); if (!mtype) return ERR_PTR(-EINVAL); total = sgtable_len(sgt); if (!total) return ERR_PTR(-EINVAL); new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); if (!new) return ERR_PTR(-ENOMEM); va = (u32)new->addr; for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes; u32 pa; int err; pa = sg_phys(sg) - sg->offset; bytes = sg->length + sg->offset; BUG_ON(bytes != PAGE_SIZE); err = ioremap_page(va, pa, mtype); if (err) goto err_out; va += bytes; } flush_cache_vmap((unsigned long)new->addr, (unsigned long)(new->addr + total)); return new->addr; err_out: WARN_ON(1); /* FIXME: cleanup some mpu mappings */ vunmap(new->addr); return ERR_PTR(-EAGAIN); } static inline void vunmap_sg(const void *va) { vunmap(va); } static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, const u32 da) { struct iovm_struct *tmp; list_for_each_entry(tmp, &obj->mmap, list) { if ((da >= tmp->da_start) && (da < tmp->da_end)) { size_t len; len = tmp->da_end - tmp->da_start; dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, tmp->da_start, da, tmp->da_end, len, tmp->flags); return tmp; } } return NULL; } /** * omap_find_iovm_area - find iovma which includes @da * @dev: client device * @da: iommu device virtual address * * Find the existing iovma starting at @da */ struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da) { struct omap_iommu *obj = dev_to_omap_iommu(dev); struct iovm_struct *area; mutex_lock(&obj->mmap_lock); area = __find_iovm_area(obj, da); mutex_unlock(&obj->mmap_lock); return area; } EXPORT_SYMBOL_GPL(omap_find_iovm_area); /* * This finds the hole(area) which fits the requested address and len * in iovmas mmap, and returns the new allocated iovma. */ static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, size_t bytes, u32 flags) { struct iovm_struct *new, *tmp; u32 start, prev_end, alignment; if (!obj || !bytes) return ERR_PTR(-EINVAL); start = da; alignment = PAGE_SIZE; if (~flags & IOVMF_DA_FIXED) { /* Don't map address 0 */ start = obj->da_start ? obj->da_start : alignment; if (flags & IOVMF_LINEAR) alignment = iopgsz_max(bytes); start = roundup(start, alignment); } else if (start < obj->da_start || start > obj->da_end || obj->da_end - start < bytes) { return ERR_PTR(-EINVAL); } tmp = NULL; if (list_empty(&obj->mmap)) goto found; prev_end = 0; list_for_each_entry(tmp, &obj->mmap, list) { if (prev_end > start) break; if (tmp->da_start > start && (tmp->da_start - start) >= bytes) goto found; if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) start = roundup(tmp->da_end + 1, alignment); prev_end = tmp->da_end; } if ((start >= prev_end) && (obj->da_end - start >= bytes)) goto found; dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", __func__, da, bytes, flags); return ERR_PTR(-EINVAL); found: new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); new->iommu = obj; new->da_start = start; new->da_end = start + bytes; new->flags = flags; /* * keep ascending order of iovmas */ if (tmp) list_add_tail(&new->list, &tmp->list); else list_add(&new->list, &obj->mmap); dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", __func__, new->da_start, start, new->da_end, bytes, flags); return new; } static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) { size_t bytes; BUG_ON(!obj || !area); bytes = area->da_end - area->da_start; dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", __func__, area->da_start, area->da_end, bytes, area->flags); list_del(&area->list); kmem_cache_free(iovm_area_cachep, area); } /** * omap_da_to_va - convert (d) to (v) * @dev: client device * @da: iommu device virtual address * @va: mpu virtual address * * Returns mpu virtual addr which corresponds to a given device virtual addr */ void *omap_da_to_va(struct device *dev, u32 da) { struct omap_iommu *obj = dev_to_omap_iommu(dev); void *va = NULL; struct iovm_struct *area; mutex_lock(&obj->mmap_lock); area = __find_iovm_area(obj, da); if (!area) { dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); goto out; } va = area->va; out: mutex_unlock(&obj->mmap_lock); return va; } EXPORT_SYMBOL_GPL(omap_da_to_va); static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) { unsigned int i; struct scatterlist *sg; void *va = _va; void *va_end; for_each_sg(sgt->sgl, sg, sgt->nents, i) { struct page *pg; const size_t bytes = PAGE_SIZE; /* * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' */ pg = vmalloc_to_page(va); BUG_ON(!pg); sg_set_page(sg, pg, bytes, 0); va += bytes; } va_end = _va + PAGE_SIZE * i; } static inline void sgtable_drain_vmalloc(struct sg_table *sgt) { /* * Actually this is not necessary at all, just exists for * consistency of the code readability. */ BUG_ON(!sgt); } /* create 'da' <-> 'pa' mapping from 'sgt' */ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, const struct sg_table *sgt, u32 flags) { int err; unsigned int i, j; struct scatterlist *sg; u32 da = new->da_start; if (!domain || !sgt) return -EINVAL; BUG_ON(!sgtable_ok(sgt)); for_each_sg(sgt->sgl, sg, sgt->nents, i) { u32 pa; size_t bytes; pa = sg_phys(sg) - sg->offset; bytes = sg->length + sg->offset; flags &= ~IOVMF_PGSZ_MASK; if (bytes_to_iopgsz(bytes) < 0) goto err_out; pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, i, da, pa, bytes); err = iommu_map(domain, da, pa, bytes, flags); if (err) goto err_out; da += bytes; } return 0; err_out: da = new->da_start; for_each_sg(sgt->sgl, sg, i, j) { size_t bytes; bytes = sg->length + sg->offset; /* ignore failures.. we're already handling one */ iommu_unmap(domain, da, bytes); da += bytes; } return err; } /* release 'da' <-> 'pa' mapping */ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, struct iovm_struct *area) { u32 start; size_t total = area->da_end - area->da_start; const struct sg_table *sgt = area->sgt; struct scatterlist *sg; int i; size_t unmapped; BUG_ON(!sgtable_ok(sgt)); BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); start = area->da_start; for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes; bytes = sg->length + sg->offset; unmapped = iommu_unmap(domain, start, bytes); if (unmapped < bytes) break; dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", __func__, start, bytes, area->flags); BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); total -= bytes; start += bytes; } BUG_ON(total); } /* template function for all unmapping */ static struct sg_table *unmap_vm_area(struct iommu_domain *domain, struct omap_iommu *obj, const u32 da, void (*fn)(const void *), u32 flags) { struct sg_table *sgt = NULL; struct iovm_struct *area; if (!IS_ALIGNED(da, PAGE_SIZE)) { dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); return NULL; } mutex_lock(&obj->mmap_lock); area = __find_iovm_area(obj, da); if (!area) { dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); goto out; } if ((area->flags & flags) != flags) { dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, area->flags); goto out; } sgt = (struct sg_table *)area->sgt; unmap_iovm_area(domain, obj, area); fn(area->va); dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, area->da_start, da, area->da_end, area->da_end - area->da_start, area->flags); free_iovm_area(obj, area); out: mutex_unlock(&obj->mmap_lock); return sgt; } static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, void *va, size_t bytes, u32 flags) { int err = -ENOMEM; struct iovm_struct *new; mutex_lock(&obj->mmap_lock); new = alloc_iovm_area(obj, da, bytes, flags); if (IS_ERR(new)) { err = PTR_ERR(new); goto err_alloc_iovma; } new->va = va; new->sgt = sgt; if (map_iovm_area(domain, new, sgt, new->flags)) goto err_map; mutex_unlock(&obj->mmap_lock); dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", __func__, new->da_start, bytes, new->flags, va); return new->da_start; err_map: free_iovm_area(obj, new); err_alloc_iovma: mutex_unlock(&obj->mmap_lock); return err; } static inline u32 __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, void *va, size_t bytes, u32 flags) { return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); } /** * omap_iommu_vmap - (d)-(p)-(v) address mapper * @domain: iommu domain * @dev: client device * @sgt: address of scatter gather table * @flags: iovma and page property * * Creates 1-n-1 mapping with given @sgt and returns @da. * All @sgt element must be io page size aligned. */ u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da, const struct sg_table *sgt, u32 flags) { struct omap_iommu *obj = dev_to_omap_iommu(dev); size_t bytes; void *va = NULL; if (!obj || !obj->dev || !sgt) return -EINVAL; bytes = sgtable_len(sgt); if (!bytes) return -EINVAL; bytes = PAGE_ALIGN(bytes); if (flags & IOVMF_MMIO) { va = vmap_sg(sgt); if (IS_ERR(va)) return PTR_ERR(va); } flags |= IOVMF_DISCONT; flags |= IOVMF_MMIO; da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); if (IS_ERR_VALUE(da)) vunmap_sg(va); return da + sgtable_offset(sgt); } EXPORT_SYMBOL_GPL(omap_iommu_vmap); /** * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' * @domain: iommu domain * @dev: client device * @da: iommu device virtual address * * Free the iommu virtually contiguous memory area starting at * @da, which was returned by 'omap_iommu_vmap()'. */ struct sg_table * omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da) { struct omap_iommu *obj = dev_to_omap_iommu(dev); struct sg_table *sgt; /* * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. * Just returns 'sgt' to the caller to free */ da &= PAGE_MASK; sgt = unmap_vm_area(domain, obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); if (!sgt) dev_dbg(obj->dev, "%s: No sgt\n", __func__); return sgt; } EXPORT_SYMBOL_GPL(omap_iommu_vunmap); /** * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper * @dev: client device * @da: contiguous iommu virtual memory * @bytes: allocation size * @flags: iovma and page property * * Allocate @bytes linearly and creates 1-n-1 mapping and returns * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. */ u32 omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da, size_t bytes, u32 flags) { struct omap_iommu *obj = dev_to_omap_iommu(dev); void *va; struct sg_table *sgt; if (!obj || !obj->dev || !bytes) return -EINVAL; bytes = PAGE_ALIGN(bytes); va = vmalloc(bytes); if (!va) return -ENOMEM; flags |= IOVMF_DISCONT; flags |= IOVMF_ALLOC; sgt = sgtable_alloc(bytes, flags, da, 0); if (IS_ERR(sgt)) { da = PTR_ERR(sgt); goto err_sgt_alloc; } sgtable_fill_vmalloc(sgt, va); da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); if (IS_ERR_VALUE(da)) goto err_iommu_vmap; return da; err_iommu_vmap: sgtable_drain_vmalloc(sgt); sgtable_free(sgt); err_sgt_alloc: vfree(va); return da; } EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); /** * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' * @dev: client device * @da: iommu device virtual address * * Frees the iommu virtually continuous memory area starting at * @da, as obtained from 'omap_iommu_vmalloc()'. */ void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev, const u32 da) { struct omap_iommu *obj = dev_to_omap_iommu(dev); struct sg_table *sgt; sgt = unmap_vm_area(domain, obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); if (!sgt) dev_dbg(obj->dev, "%s: No sgt\n", __func__); sgtable_free(sgt); } EXPORT_SYMBOL_GPL(omap_iommu_vfree); static int __init iovmm_init(void) { const unsigned long flags = SLAB_HWCACHE_ALIGN; struct kmem_cache *p; p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, flags, NULL); if (!p) return -ENOMEM; iovm_area_cachep = p; return 0; } module_init(iovmm_init); static void __exit iovmm_exit(void) { kmem_cache_destroy(iovm_area_cachep); } module_exit(iovmm_exit); MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
programmecat/linux
drivers/usb/phy/phy-tegra-usb.c
825
28615
/* * Copyright (C) 2010 Google, Inc. * Copyright (C) 2013 NVIDIA Corporation * * Author: * Erik Gilling <konkers@google.com> * Benoit Goby <benoit@android.com> * Venu Byravarasu <vbyravarasu@nvidia.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/resource.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> #include <linux/usb/of.h> #include <linux/usb/ehci_def.h> #include <linux/usb/tegra_usb_phy.h> #include <linux/regulator/consumer.h> #define ULPI_VIEWPORT 0x170 /* PORTSC PTS/PHCD bits, Tegra20 only */ #define TEGRA_USB_PORTSC1 0x184 #define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30) #define TEGRA_USB_PORTSC1_PHCD (1 << 23) /* HOSTPC1 PTS/PHCD bits, Tegra30 and above */ #define TEGRA_USB_HOSTPC1_DEVLC 0x1b4 #define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29) #define TEGRA_USB_HOSTPC1_DEVLC_PHCD (1 << 22) /* Bits of PORTSC1, which will get cleared by writing 1 into them */ #define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) #define USB_SUSP_CTRL 0x400 #define USB_WAKE_ON_CNNT_EN_DEV (1 << 3) #define USB_WAKE_ON_DISCON_EN_DEV (1 << 4) #define USB_SUSP_CLR (1 << 5) #define USB_PHY_CLK_VALID (1 << 7) #define UTMIP_RESET (1 << 11) #define UHSIC_RESET (1 << 11) #define UTMIP_PHY_ENABLE (1 << 12) #define ULPI_PHY_ENABLE (1 << 13) #define USB_SUSP_SET (1 << 14) #define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16) #define USB1_LEGACY_CTRL 0x410 #define USB1_NO_LEGACY_MODE (1 << 0) #define USB1_VBUS_SENSE_CTL_MASK (3 << 1) #define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1) #define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \ (1 << 1) #define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1) #define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1) #define ULPI_TIMING_CTRL_0 0x424 #define ULPI_OUTPUT_PINMUX_BYP (1 << 10) #define ULPI_CLKOUT_PINMUX_BYP (1 << 11) #define ULPI_TIMING_CTRL_1 0x428 #define ULPI_DATA_TRIMMER_LOAD (1 << 0) #define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1) #define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16) #define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17) #define ULPI_DIR_TRIMMER_LOAD (1 << 24) #define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25) #define UTMIP_PLL_CFG1 0x804 #define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0) #define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27) #define UTMIP_XCVR_CFG0 0x808 #define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0) #define UTMIP_XCVR_SETUP_MSB(x) ((((x) & 0x70) >> 4) << 22) #define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8) #define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10) #define UTMIP_FORCE_PD_POWERDOWN (1 << 14) #define UTMIP_FORCE_PD2_POWERDOWN (1 << 16) #define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18) #define UTMIP_XCVR_LSBIAS_SEL (1 << 21) #define UTMIP_XCVR_HSSLEW(x) (((x) & 0x3) << 4) #define UTMIP_XCVR_HSSLEW_MSB(x) ((((x) & 0x1fc) >> 2) << 25) #define UTMIP_BIAS_CFG0 0x80c #define UTMIP_OTGPD (1 << 11) #define UTMIP_BIASPD (1 << 10) #define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0) #define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2) #define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24) #define UTMIP_HSRX_CFG0 0x810 #define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10) #define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15) #define UTMIP_HSRX_CFG1 0x814 #define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1) #define UTMIP_TX_CFG0 0x820 #define UTMIP_FS_PREABMLE_J (1 << 19) #define UTMIP_HS_DISCON_DISABLE (1 << 8) #define UTMIP_MISC_CFG0 0x824 #define UTMIP_DPDM_OBSERVE (1 << 26) #define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27) #define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf) #define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe) #define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd) #define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc) #define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22) #define UTMIP_MISC_CFG1 0x828 #define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18) #define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6) #define UTMIP_DEBOUNCE_CFG0 0x82c #define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0) #define UTMIP_BAT_CHRG_CFG0 0x830 #define UTMIP_PD_CHRG (1 << 0) #define UTMIP_SPARE_CFG0 0x834 #define FUSE_SETUP_SEL (1 << 3) #define UTMIP_XCVR_CFG1 0x838 #define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0) #define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2) #define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4) #define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18) #define UTMIP_BIAS_CFG1 0x83c #define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3) /* For Tegra30 and above only, the address is different in Tegra20 */ #define USB_USBMODE 0x1f8 #define USB_USBMODE_MASK (3 << 0) #define USB_USBMODE_HOST (3 << 0) #define USB_USBMODE_DEVICE (2 << 0) static DEFINE_SPINLOCK(utmip_pad_lock); static int utmip_pad_count; struct tegra_xtal_freq { int freq; u8 enable_delay; u8 stable_count; u8 active_delay; u8 xtal_freq_count; u16 debounce; }; static const struct tegra_xtal_freq tegra_freq_table[] = { { .freq = 12000000, .enable_delay = 0x02, .stable_count = 0x2F, .active_delay = 0x04, .xtal_freq_count = 0x76, .debounce = 0x7530, }, { .freq = 13000000, .enable_delay = 0x02, .stable_count = 0x33, .active_delay = 0x05, .xtal_freq_count = 0x7F, .debounce = 0x7EF4, }, { .freq = 19200000, .enable_delay = 0x03, .stable_count = 0x4B, .active_delay = 0x06, .xtal_freq_count = 0xBB, .debounce = 0xBB80, }, { .freq = 26000000, .enable_delay = 0x04, .stable_count = 0x66, .active_delay = 0x09, .xtal_freq_count = 0xFE, .debounce = 0xFDE8, }, }; static void set_pts(struct tegra_usb_phy *phy, u8 pts_val) { void __iomem *base = phy->regs; unsigned long val; if (phy->soc_config->has_hostpc) { val = readl(base + TEGRA_USB_HOSTPC1_DEVLC); val &= ~TEGRA_USB_HOSTPC1_DEVLC_PTS(~0); val |= TEGRA_USB_HOSTPC1_DEVLC_PTS(pts_val); writel(val, base + TEGRA_USB_HOSTPC1_DEVLC); } else { val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS; val &= ~TEGRA_USB_PORTSC1_PTS(~0); val |= TEGRA_USB_PORTSC1_PTS(pts_val); writel(val, base + TEGRA_USB_PORTSC1); } } static void set_phcd(struct tegra_usb_phy *phy, bool enable) { void __iomem *base = phy->regs; unsigned long val; if (phy->soc_config->has_hostpc) { val = readl(base + TEGRA_USB_HOSTPC1_DEVLC); if (enable) val |= TEGRA_USB_HOSTPC1_DEVLC_PHCD; else val &= ~TEGRA_USB_HOSTPC1_DEVLC_PHCD; writel(val, base + TEGRA_USB_HOSTPC1_DEVLC); } else { val = readl(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS; if (enable) val |= TEGRA_USB_PORTSC1_PHCD; else val &= ~TEGRA_USB_PORTSC1_PHCD; writel(val, base + TEGRA_USB_PORTSC1); } } static int utmip_pad_open(struct tegra_usb_phy *phy) { phy->pad_clk = devm_clk_get(phy->u_phy.dev, "utmi-pads"); if (IS_ERR(phy->pad_clk)) { pr_err("%s: can't get utmip pad clock\n", __func__); return PTR_ERR(phy->pad_clk); } return 0; } static void utmip_pad_power_on(struct tegra_usb_phy *phy) { unsigned long val, flags; void __iomem *base = phy->pad_regs; struct tegra_utmip_config *config = phy->config; clk_prepare_enable(phy->pad_clk); spin_lock_irqsave(&utmip_pad_lock, flags); if (utmip_pad_count++ == 0) { val = readl(base + UTMIP_BIAS_CFG0); val &= ~(UTMIP_OTGPD | UTMIP_BIASPD); if (phy->soc_config->requires_extra_tuning_parameters) { val &= ~(UTMIP_HSSQUELCH_LEVEL(~0) | UTMIP_HSDISCON_LEVEL(~0) | UTMIP_HSDISCON_LEVEL_MSB(~0)); val |= UTMIP_HSSQUELCH_LEVEL(config->hssquelch_level); val |= UTMIP_HSDISCON_LEVEL(config->hsdiscon_level); val |= UTMIP_HSDISCON_LEVEL_MSB(config->hsdiscon_level); } writel(val, base + UTMIP_BIAS_CFG0); } spin_unlock_irqrestore(&utmip_pad_lock, flags); clk_disable_unprepare(phy->pad_clk); } static int utmip_pad_power_off(struct tegra_usb_phy *phy) { unsigned long val, flags; void __iomem *base = phy->pad_regs; if (!utmip_pad_count) { pr_err("%s: utmip pad already powered off\n", __func__); return -EINVAL; } clk_prepare_enable(phy->pad_clk); spin_lock_irqsave(&utmip_pad_lock, flags); if (--utmip_pad_count == 0) { val = readl(base + UTMIP_BIAS_CFG0); val |= UTMIP_OTGPD | UTMIP_BIASPD; writel(val, base + UTMIP_BIAS_CFG0); } spin_unlock_irqrestore(&utmip_pad_lock, flags); clk_disable_unprepare(phy->pad_clk); return 0; } static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result) { unsigned long timeout = 2000; do { if ((readl(reg) & mask) == result) return 0; udelay(1); timeout--; } while (timeout); return -1; } static void utmi_phy_clk_disable(struct tegra_usb_phy *phy) { unsigned long val; void __iomem *base = phy->regs; if (phy->is_legacy_phy) { val = readl(base + USB_SUSP_CTRL); val |= USB_SUSP_SET; writel(val, base + USB_SUSP_CTRL); udelay(10); val = readl(base + USB_SUSP_CTRL); val &= ~USB_SUSP_SET; writel(val, base + USB_SUSP_CTRL); } else set_phcd(phy, true); if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0) pr_err("%s: timeout waiting for phy to stabilize\n", __func__); } static void utmi_phy_clk_enable(struct tegra_usb_phy *phy) { unsigned long val; void __iomem *base = phy->regs; if (phy->is_legacy_phy) { val = readl(base + USB_SUSP_CTRL); val |= USB_SUSP_CLR; writel(val, base + USB_SUSP_CTRL); udelay(10); val = readl(base + USB_SUSP_CTRL); val &= ~USB_SUSP_CLR; writel(val, base + USB_SUSP_CTRL); } else set_phcd(phy, false); if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, USB_PHY_CLK_VALID)) pr_err("%s: timeout waiting for phy to stabilize\n", __func__); } static int utmi_phy_power_on(struct tegra_usb_phy *phy) { unsigned long val; void __iomem *base = phy->regs; struct tegra_utmip_config *config = phy->config; val = readl(base + USB_SUSP_CTRL); val |= UTMIP_RESET; writel(val, base + USB_SUSP_CTRL); if (phy->is_legacy_phy) { val = readl(base + USB1_LEGACY_CTRL); val |= USB1_NO_LEGACY_MODE; writel(val, base + USB1_LEGACY_CTRL); } val = readl(base + UTMIP_TX_CFG0); val |= UTMIP_FS_PREABMLE_J; writel(val, base + UTMIP_TX_CFG0); val = readl(base + UTMIP_HSRX_CFG0); val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0)); val |= UTMIP_IDLE_WAIT(config->idle_wait_delay); val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit); writel(val, base + UTMIP_HSRX_CFG0); val = readl(base + UTMIP_HSRX_CFG1); val &= ~UTMIP_HS_SYNC_START_DLY(~0); val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay); writel(val, base + UTMIP_HSRX_CFG1); val = readl(base + UTMIP_DEBOUNCE_CFG0); val &= ~UTMIP_BIAS_DEBOUNCE_A(~0); val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce); writel(val, base + UTMIP_DEBOUNCE_CFG0); val = readl(base + UTMIP_MISC_CFG0); val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE; writel(val, base + UTMIP_MISC_CFG0); if (!phy->soc_config->utmi_pll_config_in_car_module) { val = readl(base + UTMIP_MISC_CFG1); val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) | UTMIP_PLLU_STABLE_COUNT(~0)); val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) | UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count); writel(val, base + UTMIP_MISC_CFG1); val = readl(base + UTMIP_PLL_CFG1); val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) | UTMIP_PLLU_ENABLE_DLY_COUNT(~0)); val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) | UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay); writel(val, base + UTMIP_PLL_CFG1); } if (phy->mode == USB_DR_MODE_PERIPHERAL) { val = readl(base + USB_SUSP_CTRL); val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV); writel(val, base + USB_SUSP_CTRL); val = readl(base + UTMIP_BAT_CHRG_CFG0); val &= ~UTMIP_PD_CHRG; writel(val, base + UTMIP_BAT_CHRG_CFG0); } else { val = readl(base + UTMIP_BAT_CHRG_CFG0); val |= UTMIP_PD_CHRG; writel(val, base + UTMIP_BAT_CHRG_CFG0); } utmip_pad_power_on(phy); val = readl(base + UTMIP_XCVR_CFG0); val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN | UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_LSBIAS_SEL | UTMIP_XCVR_SETUP(~0) | UTMIP_XCVR_SETUP_MSB(~0) | UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0)); if (!config->xcvr_setup_use_fuses) { val |= UTMIP_XCVR_SETUP(config->xcvr_setup); val |= UTMIP_XCVR_SETUP_MSB(config->xcvr_setup); } val |= UTMIP_XCVR_LSFSLEW(config->xcvr_lsfslew); val |= UTMIP_XCVR_LSRSLEW(config->xcvr_lsrslew); if (phy->soc_config->requires_extra_tuning_parameters) { val &= ~(UTMIP_XCVR_HSSLEW(~0) | UTMIP_XCVR_HSSLEW_MSB(~0)); val |= UTMIP_XCVR_HSSLEW(config->xcvr_hsslew); val |= UTMIP_XCVR_HSSLEW_MSB(config->xcvr_hsslew); } writel(val, base + UTMIP_XCVR_CFG0); val = readl(base + UTMIP_XCVR_CFG1); val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN | UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0)); val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj); writel(val, base + UTMIP_XCVR_CFG1); val = readl(base + UTMIP_BIAS_CFG1); val &= ~UTMIP_BIAS_PDTRK_COUNT(~0); val |= UTMIP_BIAS_PDTRK_COUNT(0x5); writel(val, base + UTMIP_BIAS_CFG1); val = readl(base + UTMIP_SPARE_CFG0); if (config->xcvr_setup_use_fuses) val |= FUSE_SETUP_SEL; else val &= ~FUSE_SETUP_SEL; writel(val, base + UTMIP_SPARE_CFG0); if (!phy->is_legacy_phy) { val = readl(base + USB_SUSP_CTRL); val |= UTMIP_PHY_ENABLE; writel(val, base + USB_SUSP_CTRL); } val = readl(base + USB_SUSP_CTRL); val &= ~UTMIP_RESET; writel(val, base + USB_SUSP_CTRL); if (phy->is_legacy_phy) { val = readl(base + USB1_LEGACY_CTRL); val &= ~USB1_VBUS_SENSE_CTL_MASK; val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD; writel(val, base + USB1_LEGACY_CTRL); val = readl(base + USB_SUSP_CTRL); val &= ~USB_SUSP_SET; writel(val, base + USB_SUSP_CTRL); } utmi_phy_clk_enable(phy); if (phy->soc_config->requires_usbmode_setup) { val = readl(base + USB_USBMODE); val &= ~USB_USBMODE_MASK; if (phy->mode == USB_DR_MODE_HOST) val |= USB_USBMODE_HOST; else val |= USB_USBMODE_DEVICE; writel(val, base + USB_USBMODE); } if (!phy->is_legacy_phy) set_pts(phy, 0); return 0; } static int utmi_phy_power_off(struct tegra_usb_phy *phy) { unsigned long val; void __iomem *base = phy->regs; utmi_phy_clk_disable(phy); if (phy->mode == USB_DR_MODE_PERIPHERAL) { val = readl(base + USB_SUSP_CTRL); val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0); val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5); writel(val, base + USB_SUSP_CTRL); } val = readl(base + USB_SUSP_CTRL); val |= UTMIP_RESET; writel(val, base + USB_SUSP_CTRL); val = readl(base + UTMIP_BAT_CHRG_CFG0); val |= UTMIP_PD_CHRG; writel(val, base + UTMIP_BAT_CHRG_CFG0); val = readl(base + UTMIP_XCVR_CFG0); val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN | UTMIP_FORCE_PDZI_POWERDOWN; writel(val, base + UTMIP_XCVR_CFG0); val = readl(base + UTMIP_XCVR_CFG1); val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN | UTMIP_FORCE_PDDR_POWERDOWN; writel(val, base + UTMIP_XCVR_CFG1); return utmip_pad_power_off(phy); } static void utmi_phy_preresume(struct tegra_usb_phy *phy) { unsigned long val; void __iomem *base = phy->regs; val = readl(base + UTMIP_TX_CFG0); val |= UTMIP_HS_DISCON_DISABLE; writel(val, base + UTMIP_TX_CFG0); } static void utmi_phy_postresume(struct tegra_usb_phy *phy) { unsigned long val; void __iomem *base = phy->regs; val = readl(base + UTMIP_TX_CFG0); val &= ~UTMIP_HS_DISCON_DISABLE; writel(val, base + UTMIP_TX_CFG0); } static void utmi_phy_restore_start(struct tegra_usb_phy *phy, enum tegra_usb_phy_port_speed port_speed) { unsigned long val; void __iomem *base = phy->regs; val = readl(base + UTMIP_MISC_CFG0); val &= ~UTMIP_DPDM_OBSERVE_SEL(~0); if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) val |= UTMIP_DPDM_OBSERVE_SEL_FS_K; else val |= UTMIP_DPDM_OBSERVE_SEL_FS_J; writel(val, base + UTMIP_MISC_CFG0); udelay(1); val = readl(base + UTMIP_MISC_CFG0); val |= UTMIP_DPDM_OBSERVE; writel(val, base + UTMIP_MISC_CFG0); udelay(10); } static void utmi_phy_restore_end(struct tegra_usb_phy *phy) { unsigned long val; void __iomem *base = phy->regs; val = readl(base + UTMIP_MISC_CFG0); val &= ~UTMIP_DPDM_OBSERVE; writel(val, base + UTMIP_MISC_CFG0); udelay(10); } static int ulpi_phy_power_on(struct tegra_usb_phy *phy) { int ret; unsigned long val; void __iomem *base = phy->regs; ret = gpio_direction_output(phy->reset_gpio, 0); if (ret < 0) { dev_err(phy->u_phy.dev, "gpio %d not set to 0\n", phy->reset_gpio); return ret; } msleep(5); ret = gpio_direction_output(phy->reset_gpio, 1); if (ret < 0) { dev_err(phy->u_phy.dev, "gpio %d not set to 1\n", phy->reset_gpio); return ret; } clk_prepare_enable(phy->clk); msleep(1); val = readl(base + USB_SUSP_CTRL); val |= UHSIC_RESET; writel(val, base + USB_SUSP_CTRL); val = readl(base + ULPI_TIMING_CTRL_0); val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP; writel(val, base + ULPI_TIMING_CTRL_0); val = readl(base + USB_SUSP_CTRL); val |= ULPI_PHY_ENABLE; writel(val, base + USB_SUSP_CTRL); val = 0; writel(val, base + ULPI_TIMING_CTRL_1); val |= ULPI_DATA_TRIMMER_SEL(4); val |= ULPI_STPDIRNXT_TRIMMER_SEL(4); val |= ULPI_DIR_TRIMMER_SEL(4); writel(val, base + ULPI_TIMING_CTRL_1); udelay(10); val |= ULPI_DATA_TRIMMER_LOAD; val |= ULPI_STPDIRNXT_TRIMMER_LOAD; val |= ULPI_DIR_TRIMMER_LOAD; writel(val, base + ULPI_TIMING_CTRL_1); /* Fix VbusInvalid due to floating VBUS */ ret = usb_phy_io_write(phy->ulpi, 0x40, 0x08); if (ret) { pr_err("%s: ulpi write failed\n", __func__); return ret; } ret = usb_phy_io_write(phy->ulpi, 0x80, 0x0B); if (ret) { pr_err("%s: ulpi write failed\n", __func__); return ret; } val = readl(base + USB_SUSP_CTRL); val |= USB_SUSP_CLR; writel(val, base + USB_SUSP_CTRL); udelay(100); val = readl(base + USB_SUSP_CTRL); val &= ~USB_SUSP_CLR; writel(val, base + USB_SUSP_CTRL); return 0; } static int ulpi_phy_power_off(struct tegra_usb_phy *phy) { clk_disable(phy->clk); return gpio_direction_output(phy->reset_gpio, 0); } static void tegra_usb_phy_close(struct tegra_usb_phy *phy) { if (!IS_ERR(phy->vbus)) regulator_disable(phy->vbus); clk_disable_unprepare(phy->pll_u); } static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy) { if (phy->is_ulpi_phy) return ulpi_phy_power_on(phy); else return utmi_phy_power_on(phy); } static int tegra_usb_phy_power_off(struct tegra_usb_phy *phy) { if (phy->is_ulpi_phy) return ulpi_phy_power_off(phy); else return utmi_phy_power_off(phy); } static int tegra_usb_phy_suspend(struct usb_phy *x, int suspend) { struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy); if (suspend) return tegra_usb_phy_power_off(phy); else return tegra_usb_phy_power_on(phy); } static int ulpi_open(struct tegra_usb_phy *phy) { int err; phy->clk = devm_clk_get(phy->u_phy.dev, "ulpi-link"); if (IS_ERR(phy->clk)) { pr_err("%s: can't get ulpi clock\n", __func__); return PTR_ERR(phy->clk); } err = devm_gpio_request(phy->u_phy.dev, phy->reset_gpio, "ulpi_phy_reset_b"); if (err < 0) { dev_err(phy->u_phy.dev, "request failed for gpio: %d\n", phy->reset_gpio); return err; } err = gpio_direction_output(phy->reset_gpio, 0); if (err < 0) { dev_err(phy->u_phy.dev, "gpio %d direction not set to output\n", phy->reset_gpio); return err; } phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0); if (!phy->ulpi) { dev_err(phy->u_phy.dev, "otg_ulpi_create returned NULL\n"); err = -ENOMEM; return err; } phy->ulpi->io_priv = phy->regs + ULPI_VIEWPORT; return 0; } static int tegra_usb_phy_init(struct tegra_usb_phy *phy) { unsigned long parent_rate; int i; int err; phy->pll_u = devm_clk_get(phy->u_phy.dev, "pll_u"); if (IS_ERR(phy->pll_u)) { pr_err("Can't get pll_u clock\n"); return PTR_ERR(phy->pll_u); } err = clk_prepare_enable(phy->pll_u); if (err) return err; parent_rate = clk_get_rate(clk_get_parent(phy->pll_u)); for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) { if (tegra_freq_table[i].freq == parent_rate) { phy->freq = &tegra_freq_table[i]; break; } } if (!phy->freq) { pr_err("invalid pll_u parent rate %ld\n", parent_rate); err = -EINVAL; goto fail; } if (!IS_ERR(phy->vbus)) { err = regulator_enable(phy->vbus); if (err) { dev_err(phy->u_phy.dev, "failed to enable usb vbus regulator: %d\n", err); goto fail; } } if (phy->is_ulpi_phy) err = ulpi_open(phy); else err = utmip_pad_open(phy); if (err < 0) goto fail; return 0; fail: clk_disable_unprepare(phy->pll_u); return err; } void tegra_usb_phy_preresume(struct usb_phy *x) { struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy); if (!phy->is_ulpi_phy) utmi_phy_preresume(phy); } EXPORT_SYMBOL_GPL(tegra_usb_phy_preresume); void tegra_usb_phy_postresume(struct usb_phy *x) { struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy); if (!phy->is_ulpi_phy) utmi_phy_postresume(phy); } EXPORT_SYMBOL_GPL(tegra_usb_phy_postresume); void tegra_ehci_phy_restore_start(struct usb_phy *x, enum tegra_usb_phy_port_speed port_speed) { struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy); if (!phy->is_ulpi_phy) utmi_phy_restore_start(phy, port_speed); } EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_start); void tegra_ehci_phy_restore_end(struct usb_phy *x) { struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy); if (!phy->is_ulpi_phy) utmi_phy_restore_end(phy); } EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_end); static int read_utmi_param(struct platform_device *pdev, const char *param, u8 *dest) { u32 value; int err = of_property_read_u32(pdev->dev.of_node, param, &value); *dest = (u8)value; if (err < 0) dev_err(&pdev->dev, "Failed to read USB UTMI parameter %s: %d\n", param, err); return err; } static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, struct platform_device *pdev) { struct resource *res; int err; struct tegra_utmip_config *config; tegra_phy->is_ulpi_phy = false; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(&pdev->dev, "Failed to get UTMI Pad regs\n"); return -ENXIO; } tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!tegra_phy->pad_regs) { dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n"); return -ENOMEM; } tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); if (!tegra_phy->config) return -ENOMEM; config = tegra_phy->config; err = read_utmi_param(pdev, "nvidia,hssync-start-delay", &config->hssync_start_delay); if (err < 0) return err; err = read_utmi_param(pdev, "nvidia,elastic-limit", &config->elastic_limit); if (err < 0) return err; err = read_utmi_param(pdev, "nvidia,idle-wait-delay", &config->idle_wait_delay); if (err < 0) return err; err = read_utmi_param(pdev, "nvidia,term-range-adj", &config->term_range_adj); if (err < 0) return err; err = read_utmi_param(pdev, "nvidia,xcvr-lsfslew", &config->xcvr_lsfslew); if (err < 0) return err; err = read_utmi_param(pdev, "nvidia,xcvr-lsrslew", &config->xcvr_lsrslew); if (err < 0) return err; if (tegra_phy->soc_config->requires_extra_tuning_parameters) { err = read_utmi_param(pdev, "nvidia,xcvr-hsslew", &config->xcvr_hsslew); if (err < 0) return err; err = read_utmi_param(pdev, "nvidia,hssquelch-level", &config->hssquelch_level); if (err < 0) return err; err = read_utmi_param(pdev, "nvidia,hsdiscon-level", &config->hsdiscon_level); if (err < 0) return err; } config->xcvr_setup_use_fuses = of_property_read_bool( pdev->dev.of_node, "nvidia,xcvr-setup-use-fuses"); if (!config->xcvr_setup_use_fuses) { err = read_utmi_param(pdev, "nvidia,xcvr-setup", &config->xcvr_setup); if (err < 0) return err; } return 0; } static const struct tegra_phy_soc_config tegra20_soc_config = { .utmi_pll_config_in_car_module = false, .has_hostpc = false, .requires_usbmode_setup = false, .requires_extra_tuning_parameters = false, }; static const struct tegra_phy_soc_config tegra30_soc_config = { .utmi_pll_config_in_car_module = true, .has_hostpc = true, .requires_usbmode_setup = true, .requires_extra_tuning_parameters = true, }; static const struct of_device_id tegra_usb_phy_id_table[] = { { .compatible = "nvidia,tegra30-usb-phy", .data = &tegra30_soc_config }, { .compatible = "nvidia,tegra20-usb-phy", .data = &tegra20_soc_config }, { }, }; MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table); static int tegra_usb_phy_probe(struct platform_device *pdev) { const struct of_device_id *match; struct resource *res; struct tegra_usb_phy *tegra_phy = NULL; struct device_node *np = pdev->dev.of_node; enum usb_phy_interface phy_type; int err; tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL); if (!tegra_phy) return -ENOMEM; match = of_match_device(tegra_usb_phy_id_table, &pdev->dev); if (!match) { dev_err(&pdev->dev, "Error: No device match found\n"); return -ENODEV; } tegra_phy->soc_config = match->data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Failed to get I/O memory\n"); return -ENXIO; } tegra_phy->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!tegra_phy->regs) { dev_err(&pdev->dev, "Failed to remap I/O memory\n"); return -ENOMEM; } tegra_phy->is_legacy_phy = of_property_read_bool(np, "nvidia,has-legacy-mode"); phy_type = of_usb_get_phy_mode(np); switch (phy_type) { case USBPHY_INTERFACE_MODE_UTMI: err = utmi_phy_probe(tegra_phy, pdev); if (err < 0) return err; break; case USBPHY_INTERFACE_MODE_ULPI: tegra_phy->is_ulpi_phy = true; tegra_phy->reset_gpio = of_get_named_gpio(np, "nvidia,phy-reset-gpio", 0); if (!gpio_is_valid(tegra_phy->reset_gpio)) { dev_err(&pdev->dev, "invalid gpio: %d\n", tegra_phy->reset_gpio); return tegra_phy->reset_gpio; } tegra_phy->config = NULL; break; default: dev_err(&pdev->dev, "phy_type is invalid or unsupported\n"); return -EINVAL; } if (of_find_property(np, "dr_mode", NULL)) tegra_phy->mode = of_usb_get_dr_mode(np); else tegra_phy->mode = USB_DR_MODE_HOST; if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) { dev_err(&pdev->dev, "dr_mode is invalid\n"); return -EINVAL; } /* On some boards, the VBUS regulator doesn't need to be controlled */ if (of_find_property(np, "vbus-supply", NULL)) { tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus"); if (IS_ERR(tegra_phy->vbus)) return PTR_ERR(tegra_phy->vbus); } else { dev_notice(&pdev->dev, "no vbus regulator"); tegra_phy->vbus = ERR_PTR(-ENODEV); } tegra_phy->u_phy.dev = &pdev->dev; err = tegra_usb_phy_init(tegra_phy); if (err < 0) return err; tegra_phy->u_phy.set_suspend = tegra_usb_phy_suspend; platform_set_drvdata(pdev, tegra_phy); err = usb_add_phy_dev(&tegra_phy->u_phy); if (err < 0) { tegra_usb_phy_close(tegra_phy); return err; } return 0; } static int tegra_usb_phy_remove(struct platform_device *pdev) { struct tegra_usb_phy *tegra_phy = platform_get_drvdata(pdev); usb_remove_phy(&tegra_phy->u_phy); tegra_usb_phy_close(tegra_phy); return 0; } static struct platform_driver tegra_usb_phy_driver = { .probe = tegra_usb_phy_probe, .remove = tegra_usb_phy_remove, .driver = { .name = "tegra-phy", .of_match_table = tegra_usb_phy_id_table, }, }; module_platform_driver(tegra_usb_phy_driver); MODULE_DESCRIPTION("Tegra USB PHY driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
raininja/android_kernel_asus_a500cg
arch/arm/mach-s3c24xx/cpufreq-s3c2412.c
2105
6174
/* * Copyright 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C2412 CPU Frequency scalling * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/cpufreq.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/regs-clock.h> #include <plat/cpu.h> #include <plat/clock.h> #include <plat/cpu-freq-core.h> #include "s3c2412.h" /* our clock resources. */ static struct clk *xtal; static struct clk *fclk; static struct clk *hclk; static struct clk *armclk; /* HDIV: 1, 2, 3, 4, 6, 8 */ static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) { unsigned int hdiv, pdiv, armdiv, dvs; unsigned long hclk, fclk, armclk, armdiv_clk; unsigned long hclk_max; fclk = cfg->freq.fclk; armclk = cfg->freq.armclk; hclk_max = cfg->max.hclk; /* We can't run hclk above armclk as at the best we have to * have armclk and hclk in dvs mode. */ if (hclk_max > armclk) hclk_max = armclk; s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n", __func__, fclk, armclk, hclk_max); s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n", __func__, cfg->freq.fclk, cfg->freq.armclk, cfg->freq.hclk, cfg->freq.pclk); armdiv = fclk / armclk; if (armdiv < 1) armdiv = 1; if (armdiv > 2) armdiv = 2; cfg->divs.arm_divisor = armdiv; armdiv_clk = fclk / armdiv; hdiv = armdiv_clk / hclk_max; if (hdiv < 1) hdiv = 1; cfg->freq.hclk = hclk = armdiv_clk / hdiv; /* set dvs depending on whether we reached armclk or not. */ cfg->divs.dvs = dvs = armclk < armdiv_clk; /* update the actual armclk we achieved. */ cfg->freq.armclk = dvs ? hclk : armdiv_clk; s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n", __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs); if (hdiv > 4) goto invalid; pdiv = (hclk > cfg->max.pclk) ? 2 : 1; if ((hclk / pdiv) > cfg->max.pclk) pdiv++; cfg->freq.pclk = hclk / pdiv; s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv); if (pdiv > 2) goto invalid; pdiv *= hdiv; /* store the result, and then return */ cfg->divs.h_divisor = hdiv * armdiv; cfg->divs.p_divisor = pdiv * armdiv; return 0; invalid: return -EINVAL; } static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) { unsigned long clkdiv; unsigned long olddiv; olddiv = clkdiv = __raw_readl(S3C2410_CLKDIVN); /* clear off current clock info */ clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN; clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK; clkdiv &= ~S3C2412_CLKDIVN_PDIVN; if (cfg->divs.arm_divisor == 2) clkdiv |= S3C2412_CLKDIVN_ARMDIVN; clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1); if (cfg->divs.p_divisor != cfg->divs.h_divisor) clkdiv |= S3C2412_CLKDIVN_PDIVN; s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv); __raw_writel(clkdiv, S3C2410_CLKDIVN); clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk); } static void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) { struct s3c_cpufreq_board *board = cfg->board; unsigned long refresh; s3c_freq_dbg("%s: refresh %u ns, hclk %lu\n", __func__, board->refresh, cfg->freq.hclk); /* Reduce both the refresh time (in ns) and the frequency (in MHz) * by 10 each to ensure that we do not overflow 32 bit numbers. This * should work for HCLK up to 133MHz and refresh period up to 30usec. */ refresh = (board->refresh / 10); refresh *= (cfg->freq.hclk / 100); refresh /= (1 * 1000 * 1000); /* 10^6 */ s3c_freq_dbg("%s: setting refresh 0x%08lx\n", __func__, refresh); __raw_writel(refresh, S3C2412_REFRESH); } /* set the default cpu frequency information, based on an 200MHz part * as we have no other way of detecting the speed rating in software. */ static struct s3c_cpufreq_info s3c2412_cpufreq_info = { .max = { .fclk = 200000000, .hclk = 100000000, .pclk = 50000000, }, .latency = 5000000, /* 5ms */ .locktime_m = 150, .locktime_u = 150, .locktime_bits = 16, .name = "s3c2412", .set_refresh = s3c2412_cpufreq_setrefresh, .set_divs = s3c2412_cpufreq_setdivs, .calc_divs = s3c2412_cpufreq_calcdivs, .calc_iotiming = s3c2412_iotiming_calc, .set_iotiming = s3c2412_iotiming_set, .get_iotiming = s3c2412_iotiming_get, .resume_clocks = s3c2412_setup_clocks, .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs), }; static int s3c2412_cpufreq_add(struct device *dev, struct subsys_interface *sif) { unsigned long fclk_rate; hclk = clk_get(NULL, "hclk"); if (IS_ERR(hclk)) { printk(KERN_ERR "%s: cannot find hclk clock\n", __func__); return -ENOENT; } fclk = clk_get(NULL, "fclk"); if (IS_ERR(fclk)) { printk(KERN_ERR "%s: cannot find fclk clock\n", __func__); goto err_fclk; } fclk_rate = clk_get_rate(fclk); if (fclk_rate > 200000000) { printk(KERN_INFO "%s: fclk %ld MHz, assuming 266MHz capable part\n", __func__, fclk_rate / 1000000); s3c2412_cpufreq_info.max.fclk = 266000000; s3c2412_cpufreq_info.max.hclk = 133000000; s3c2412_cpufreq_info.max.pclk = 66000000; } armclk = clk_get(NULL, "armclk"); if (IS_ERR(armclk)) { printk(KERN_ERR "%s: cannot find arm clock\n", __func__); goto err_armclk; } xtal = clk_get(NULL, "xtal"); if (IS_ERR(xtal)) { printk(KERN_ERR "%s: cannot find xtal clock\n", __func__); goto err_xtal; } return s3c_cpufreq_register(&s3c2412_cpufreq_info); err_xtal: clk_put(armclk); err_armclk: clk_put(fclk); err_fclk: clk_put(hclk); return -ENOENT; } static struct subsys_interface s3c2412_cpufreq_interface = { .name = "s3c2412_cpufreq", .subsys = &s3c2412_subsys, .add_dev = s3c2412_cpufreq_add, }; static int s3c2412_cpufreq_init(void) { return subsys_interface_register(&s3c2412_cpufreq_interface); } arch_initcall(s3c2412_cpufreq_init);
gpl-2.0
loansindi/linux
arch/powerpc/platforms/512x/mpc5121_ads.c
2105
1721
/* * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. All rights reserved. * * Author: John Rigby, <jrigby@freescale.com>, Thur Mar 29 2007 * * Description: * MPC5121 ADS board setup * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/prom.h> #include <asm/time.h> #include <sysdev/fsl_pci.h> #include "mpc512x.h" #include "mpc5121_ads.h" static void __init mpc5121_ads_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; #endif printk(KERN_INFO "MPC5121 ADS board from Freescale Semiconductor\n"); /* * cpld regs are needed early */ mpc5121_ads_cpld_map(); #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,mpc5121-pci") mpc83xx_add_bridge(np); #endif mpc512x_setup_arch(); } static void __init mpc5121_ads_init_IRQ(void) { mpc512x_init_IRQ(); mpc5121_ads_cpld_pic_init(); } /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc5121_ads_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,mpc5121ads"); } define_machine(mpc5121_ads) { .name = "MPC5121 ADS", .probe = mpc5121_ads_probe, .setup_arch = mpc5121_ads_setup_arch, .init = mpc512x_init, .init_early = mpc512x_init_early, .init_IRQ = mpc5121_ads_init_IRQ, .get_irq = ipic_get_irq, .calibrate_decr = generic_calibrate_decr, .restart = mpc512x_restart, };
gpl-2.0
android-armv7a-belalang-tempur/Android_SpeedKernel
arch/arm/plat-mxc/gpio.c
2361
10217
/* * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> * Copyright 2008 Juergen Beisert, kernel@pengutronix.de * * Based on code from Freescale, * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/gpio.h> #include <mach/hardware.h> #include <asm-generic/bug.h> static struct mxc_gpio_port *mxc_gpio_ports; static int gpio_table_size; #define cpu_is_mx1_mx2() (cpu_is_mx1() || cpu_is_mx2()) #define GPIO_DR (cpu_is_mx1_mx2() ? 0x1c : 0x00) #define GPIO_GDIR (cpu_is_mx1_mx2() ? 0x00 : 0x04) #define GPIO_PSR (cpu_is_mx1_mx2() ? 0x24 : 0x08) #define GPIO_ICR1 (cpu_is_mx1_mx2() ? 0x28 : 0x0C) #define GPIO_ICR2 (cpu_is_mx1_mx2() ? 0x2C : 0x10) #define GPIO_IMR (cpu_is_mx1_mx2() ? 0x30 : 0x14) #define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18) #define GPIO_INT_LOW_LEV (cpu_is_mx1_mx2() ? 0x3 : 0x0) #define GPIO_INT_HIGH_LEV (cpu_is_mx1_mx2() ? 0x2 : 0x1) #define GPIO_INT_RISE_EDGE (cpu_is_mx1_mx2() ? 0x0 : 0x2) #define GPIO_INT_FALL_EDGE (cpu_is_mx1_mx2() ? 0x1 : 0x3) #define GPIO_INT_NONE 0x4 /* Note: This driver assumes 32 GPIOs are handled in one register */ static void _clear_gpio_irqstatus(struct mxc_gpio_port *port, u32 index) { __raw_writel(1 << index, port->base + GPIO_ISR); } static void _set_gpio_irqenable(struct mxc_gpio_port *port, u32 index, int enable) { u32 l; l = __raw_readl(port->base + GPIO_IMR); l = (l & (~(1 << index))) | (!!enable << index); __raw_writel(l, port->base + GPIO_IMR); } static void gpio_ack_irq(struct irq_data *d) { u32 gpio = irq_to_gpio(d->irq); _clear_gpio_irqstatus(&mxc_gpio_ports[gpio / 32], gpio & 0x1f); } static void gpio_mask_irq(struct irq_data *d) { u32 gpio = irq_to_gpio(d->irq); _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 0); } static void gpio_unmask_irq(struct irq_data *d) { u32 gpio = irq_to_gpio(d->irq); _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 1); } static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset); static int gpio_set_irq_type(struct irq_data *d, u32 type) { u32 gpio = irq_to_gpio(d->irq); struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32]; u32 bit, val; int edge; void __iomem *reg = port->base; port->both_edges &= ~(1 << (gpio & 31)); switch (type) { case IRQ_TYPE_EDGE_RISING: edge = GPIO_INT_RISE_EDGE; break; case IRQ_TYPE_EDGE_FALLING: edge = GPIO_INT_FALL_EDGE; break; case IRQ_TYPE_EDGE_BOTH: val = mxc_gpio_get(&port->chip, gpio & 31); if (val) { edge = GPIO_INT_LOW_LEV; pr_debug("mxc: set GPIO %d to low trigger\n", gpio); } else { edge = GPIO_INT_HIGH_LEV; pr_debug("mxc: set GPIO %d to high trigger\n", gpio); } port->both_edges |= 1 << (gpio & 31); break; case IRQ_TYPE_LEVEL_LOW: edge = GPIO_INT_LOW_LEV; break; case IRQ_TYPE_LEVEL_HIGH: edge = GPIO_INT_HIGH_LEV; break; default: return -EINVAL; } reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ bit = gpio & 0xf; val = __raw_readl(reg) & ~(0x3 << (bit << 1)); __raw_writel(val | (edge << (bit << 1)), reg); _clear_gpio_irqstatus(port, gpio & 0x1f); return 0; } static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) { void __iomem *reg = port->base; u32 bit, val; int edge; reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ bit = gpio & 0xf; val = __raw_readl(reg); edge = (val >> (bit << 1)) & 3; val &= ~(0x3 << (bit << 1)); if (edge == GPIO_INT_HIGH_LEV) { edge = GPIO_INT_LOW_LEV; pr_debug("mxc: switch GPIO %d to low trigger\n", gpio); } else if (edge == GPIO_INT_LOW_LEV) { edge = GPIO_INT_HIGH_LEV; pr_debug("mxc: switch GPIO %d to high trigger\n", gpio); } else { pr_err("mxc: invalid configuration for GPIO %d: %x\n", gpio, edge); return; } __raw_writel(val | (edge << (bit << 1)), reg); } /* handle 32 interrupts in one status register */ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) { u32 gpio_irq_no_base = port->virtual_irq_start; while (irq_stat != 0) { int irqoffset = fls(irq_stat) - 1; if (port->both_edges & (1 << irqoffset)) mxc_flip_edge(port, irqoffset); generic_handle_irq(gpio_irq_no_base + irqoffset); irq_stat &= ~(1 << irqoffset); } } /* MX1 and MX3 has one interrupt *per* gpio port */ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) { u32 irq_stat; struct mxc_gpio_port *port = irq_get_handler_data(irq); irq_stat = __raw_readl(port->base + GPIO_ISR) & __raw_readl(port->base + GPIO_IMR); mxc_gpio_irq_handler(port, irq_stat); } /* MX2 has one interrupt *for all* gpio ports */ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) { int i; u32 irq_msk, irq_stat; struct mxc_gpio_port *port = irq_get_handler_data(irq); /* walk through all interrupt status registers */ for (i = 0; i < gpio_table_size; i++) { irq_msk = __raw_readl(port[i].base + GPIO_IMR); if (!irq_msk) continue; irq_stat = __raw_readl(port[i].base + GPIO_ISR) & irq_msk; if (irq_stat) mxc_gpio_irq_handler(&port[i], irq_stat); } } /* * Set interrupt number "irq" in the GPIO as a wake-up source. * While system is running, all registered GPIO interrupts need to have * wake-up enabled. When system is suspended, only selected GPIO interrupts * need to have wake-up enabled. * @param irq interrupt source number * @param enable enable as wake-up if equal to non-zero * @return This function returns 0 on success. */ static int gpio_set_wake_irq(struct irq_data *d, u32 enable) { u32 gpio = irq_to_gpio(d->irq); u32 gpio_idx = gpio & 0x1F; struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32]; if (enable) { if (port->irq_high && (gpio_idx >= 16)) enable_irq_wake(port->irq_high); else enable_irq_wake(port->irq); } else { if (port->irq_high && (gpio_idx >= 16)) disable_irq_wake(port->irq_high); else disable_irq_wake(port->irq); } return 0; } static struct irq_chip gpio_irq_chip = { .name = "GPIO", .irq_ack = gpio_ack_irq, .irq_mask = gpio_mask_irq, .irq_unmask = gpio_unmask_irq, .irq_set_type = gpio_set_irq_type, .irq_set_wake = gpio_set_wake_irq, }; static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset, int dir) { struct mxc_gpio_port *port = container_of(chip, struct mxc_gpio_port, chip); u32 l; unsigned long flags; spin_lock_irqsave(&port->lock, flags); l = __raw_readl(port->base + GPIO_GDIR); if (dir) l |= 1 << offset; else l &= ~(1 << offset); __raw_writel(l, port->base + GPIO_GDIR); spin_unlock_irqrestore(&port->lock, flags); } static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct mxc_gpio_port *port = container_of(chip, struct mxc_gpio_port, chip); void __iomem *reg = port->base + GPIO_DR; u32 l; unsigned long flags; spin_lock_irqsave(&port->lock, flags); l = (__raw_readl(reg) & (~(1 << offset))) | (!!value << offset); __raw_writel(l, reg); spin_unlock_irqrestore(&port->lock, flags); } static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset) { struct mxc_gpio_port *port = container_of(chip, struct mxc_gpio_port, chip); return (__raw_readl(port->base + GPIO_PSR) >> offset) & 1; } static int mxc_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { _set_gpio_direction(chip, offset, 0); return 0; } static int mxc_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { mxc_gpio_set(chip, offset, value); _set_gpio_direction(chip, offset, 1); return 0; } /* * This lock class tells lockdep that GPIO irqs are in a different * category than their parents, so it won't report false recursion. */ static struct lock_class_key gpio_lock_class; int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) { int i, j; /* save for local usage */ mxc_gpio_ports = port; gpio_table_size = cnt; printk(KERN_INFO "MXC GPIO hardware\n"); for (i = 0; i < cnt; i++) { /* disable the interrupt and clear the status */ __raw_writel(0, port[i].base + GPIO_IMR); __raw_writel(~0, port[i].base + GPIO_ISR); for (j = port[i].virtual_irq_start; j < port[i].virtual_irq_start + 32; j++) { irq_set_lockdep_class(j, &gpio_lock_class); irq_set_chip_and_handler(j, &gpio_irq_chip, handle_level_irq); set_irq_flags(j, IRQF_VALID); } /* register gpio chip */ port[i].chip.direction_input = mxc_gpio_direction_input; port[i].chip.direction_output = mxc_gpio_direction_output; port[i].chip.get = mxc_gpio_get; port[i].chip.set = mxc_gpio_set; port[i].chip.base = i * 32; port[i].chip.ngpio = 32; spin_lock_init(&port[i].lock); /* its a serious configuration bug when it fails */ BUG_ON( gpiochip_add(&port[i].chip) < 0 ); if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) { /* setup one handler for each entry */ irq_set_chained_handler(port[i].irq, mx3_gpio_irq_handler); irq_set_handler_data(port[i].irq, &port[i]); if (port[i].irq_high) { /* setup handler for GPIO 16 to 31 */ irq_set_chained_handler(port[i].irq_high, mx3_gpio_irq_handler); irq_set_handler_data(port[i].irq_high, &port[i]); } } } if (cpu_is_mx2()) { /* setup one handler for all GPIO interrupts */ irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler); irq_set_handler_data(port[0].irq, port); } return 0; }
gpl-2.0
HeydayGuan/android-kernel-3.0
drivers/gpu/drm/nouveau/nv10_fb.c
2873
3500
#include "drmP.h" #include "drm.h" #include "nouveau_drv.h" #include "nouveau_drm.h" static struct drm_mm_node * nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct drm_mm_node *mem; int ret; ret = drm_mm_pre_get(&pfb->tag_heap); if (ret) return NULL; spin_lock(&dev_priv->tile.lock); mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); if (mem) mem = drm_mm_get_block_atomic(mem, size, 0); spin_unlock(&dev_priv->tile.lock); return mem; } static void nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem) { struct drm_nouveau_private *dev_priv = dev->dev_private; spin_lock(&dev_priv->tile.lock); drm_mm_put_block(mem); spin_unlock(&dev_priv->tile.lock); } void nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, uint32_t size, uint32_t pitch, uint32_t flags) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16); tile->addr = addr; tile->limit = max(1u, addr + size) - 1; tile->pitch = pitch; if (dev_priv->card_type == NV_20) { if (flags & NOUVEAU_GEM_TILE_ZETA) { /* * Allocate some of the on-die tag memory, * used to store Z compression meta-data (most * likely just a bitmap determining if a given * tile is compressed or not). */ tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256); if (tile->tag_mem) { /* Enable Z compression */ if (dev_priv->chipset >= 0x25) tile->zcomp = tile->tag_mem->start | (bpp == 16 ? NV25_PFB_ZCOMP_MODE_16 : NV25_PFB_ZCOMP_MODE_32); else tile->zcomp = tile->tag_mem->start | NV20_PFB_ZCOMP_EN | (bpp == 16 ? 0 : NV20_PFB_ZCOMP_MODE_32); } tile->addr |= 3; } else { tile->addr |= 1; } } else { tile->addr |= 1 << 31; } } void nv10_fb_free_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; if (tile->tag_mem) { nv20_fb_free_tag(dev, tile->tag_mem); tile->tag_mem = NULL; } tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; } void nv10_fb_set_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); if (dev_priv->card_type == NV_20) nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp); } int nv10_fb_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; int i; pfb->num_tiles = NV10_PFB_TILE__SIZE; if (dev_priv->card_type == NV_20) drm_mm_init(&pfb->tag_heap, 0, (dev_priv->chipset >= 0x25 ? 64 * 1024 : 32 * 1024)); /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) pfb->set_tile_region(dev, i); return 0; } void nv10_fb_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; int i; for (i = 0; i < pfb->num_tiles; i++) pfb->free_tile_region(dev, i); if (dev_priv->card_type == NV_20) drm_mm_takedown(&pfb->tag_heap); }
gpl-2.0
ipaccess/linux-yocto-3.10
drivers/video/omap/lcd_ams_delta.c
3129
5360
/* * Based on drivers/video/omap/lcd_inn1510.c * * LCD panel support for the Amstrad E3 (Delta) videophone. * * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/lcd.h> #include <linux/gpio.h> #include <mach/hardware.h> #include <mach/board-ams-delta.h> #include "omapfb.h" #define AMS_DELTA_DEFAULT_CONTRAST 112 #define AMS_DELTA_MAX_CONTRAST 0x00FF #define AMS_DELTA_LCD_POWER 0x0100 /* LCD class device section */ static int ams_delta_lcd; static int ams_delta_lcd_set_power(struct lcd_device *dev, int power) { if (power == FB_BLANK_UNBLANK) { if (!(ams_delta_lcd & AMS_DELTA_LCD_POWER)) { omap_writeb(ams_delta_lcd & AMS_DELTA_MAX_CONTRAST, OMAP_PWL_ENABLE); omap_writeb(1, OMAP_PWL_CLK_ENABLE); ams_delta_lcd |= AMS_DELTA_LCD_POWER; } } else { if (ams_delta_lcd & AMS_DELTA_LCD_POWER) { omap_writeb(0, OMAP_PWL_ENABLE); omap_writeb(0, OMAP_PWL_CLK_ENABLE); ams_delta_lcd &= ~AMS_DELTA_LCD_POWER; } } return 0; } static int ams_delta_lcd_set_contrast(struct lcd_device *dev, int value) { if ((value >= 0) && (value <= AMS_DELTA_MAX_CONTRAST)) { omap_writeb(value, OMAP_PWL_ENABLE); ams_delta_lcd &= ~AMS_DELTA_MAX_CONTRAST; ams_delta_lcd |= value; } return 0; } #ifdef CONFIG_LCD_CLASS_DEVICE static int ams_delta_lcd_get_power(struct lcd_device *dev) { if (ams_delta_lcd & AMS_DELTA_LCD_POWER) return FB_BLANK_UNBLANK; else return FB_BLANK_POWERDOWN; } static int ams_delta_lcd_get_contrast(struct lcd_device *dev) { if (!(ams_delta_lcd & AMS_DELTA_LCD_POWER)) return 0; return ams_delta_lcd & AMS_DELTA_MAX_CONTRAST; } static struct lcd_ops ams_delta_lcd_ops = { .get_power = ams_delta_lcd_get_power, .set_power = ams_delta_lcd_set_power, .get_contrast = ams_delta_lcd_get_contrast, .set_contrast = ams_delta_lcd_set_contrast, }; #endif /* omapfb panel section */ static const struct gpio _gpios[] = { { .gpio = AMS_DELTA_GPIO_PIN_LCD_VBLEN, .flags = GPIOF_OUT_INIT_LOW, .label = "lcd_vblen", }, { .gpio = AMS_DELTA_GPIO_PIN_LCD_NDISP, .flags = GPIOF_OUT_INIT_LOW, .label = "lcd_ndisp", }, }; static int ams_delta_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { return gpio_request_array(_gpios, ARRAY_SIZE(_gpios)); } static void ams_delta_panel_cleanup(struct lcd_panel *panel) { gpio_free_array(_gpios, ARRAY_SIZE(_gpios)); } static int ams_delta_panel_enable(struct lcd_panel *panel) { gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 1); gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_VBLEN, 1); return 0; } static void ams_delta_panel_disable(struct lcd_panel *panel) { gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_VBLEN, 0); gpio_set_value(AMS_DELTA_GPIO_PIN_LCD_NDISP, 0); } static unsigned long ams_delta_panel_get_caps(struct lcd_panel *panel) { return 0; } static struct lcd_panel ams_delta_panel = { .name = "ams-delta", .config = 0, .bpp = 12, .data_lines = 16, .x_res = 480, .y_res = 320, .pixel_clock = 4687, .hsw = 3, .hfp = 1, .hbp = 1, .vsw = 1, .vfp = 0, .vbp = 0, .pcd = 0, .acb = 37, .init = ams_delta_panel_init, .cleanup = ams_delta_panel_cleanup, .enable = ams_delta_panel_enable, .disable = ams_delta_panel_disable, .get_caps = ams_delta_panel_get_caps, }; /* platform driver section */ static int ams_delta_panel_probe(struct platform_device *pdev) { struct lcd_device *lcd_device = NULL; #ifdef CONFIG_LCD_CLASS_DEVICE int ret; lcd_device = lcd_device_register("omapfb", &pdev->dev, NULL, &ams_delta_lcd_ops); if (IS_ERR(lcd_device)) { ret = PTR_ERR(lcd_device); dev_err(&pdev->dev, "failed to register device\n"); return ret; } platform_set_drvdata(pdev, lcd_device); lcd_device->props.max_contrast = AMS_DELTA_MAX_CONTRAST; #endif ams_delta_lcd_set_contrast(lcd_device, AMS_DELTA_DEFAULT_CONTRAST); ams_delta_lcd_set_power(lcd_device, FB_BLANK_UNBLANK); omapfb_register_panel(&ams_delta_panel); return 0; } static int ams_delta_panel_remove(struct platform_device *pdev) { return 0; } static int ams_delta_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int ams_delta_panel_resume(struct platform_device *pdev) { return 0; } static struct platform_driver ams_delta_panel_driver = { .probe = ams_delta_panel_probe, .remove = ams_delta_panel_remove, .suspend = ams_delta_panel_suspend, .resume = ams_delta_panel_resume, .driver = { .name = "lcd_ams_delta", .owner = THIS_MODULE, }, }; module_platform_driver(ams_delta_panel_driver);
gpl-2.0
poondog/kangaroo-m7-mkII
arch/cris/mm/fault.c
3897
8944
/* * arch/cris/mm/fault.c * * Copyright (C) 2000-2010 Axis Communications AB */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/wait.h> #include <asm/uaccess.h> #include <arch/system.h> extern int find_fixup_code(struct pt_regs *); extern void die_if_kernel(const char *, struct pt_regs *, long); extern void show_registers(struct pt_regs *regs); /* debug of low-level TLB reload */ #undef DEBUG #ifdef DEBUG #define D(x) x #else #define D(x) #endif /* debug of higher-level faults */ #define DPG(x) /* current active page directory */ DEFINE_PER_CPU(pgd_t *, current_pgd); unsigned long cris_signal_return_page; /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * Notice that the address we're given is aligned to the page the fault * occurred in, since we only get the PFN in R_MMU_CAUSE not the complete * address. * * error_code: * bit 0 == 0 means no page found, 1 means protection fault * bit 1 == 0 means read, 1 means write * * If this routine detects a bad access, it returns 1, otherwise it * returns 0. */ asmlinkage void do_page_fault(unsigned long address, struct pt_regs *regs, int protection, int writeaccess) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; siginfo_t info; int fault; D(printk(KERN_DEBUG "Page fault for %lX on %X at %lX, prot %d write %d\n", address, smp_processor_id(), instruction_pointer(regs), protection, writeaccess)); tsk = current; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. * * NOTE2: This is done so that, when updating the vmalloc * mappings we don't have to walk all processes pgdirs and * add the high mappings all at once. Instead we do it as they * are used. However vmalloc'ed page entries have the PAGE_GLOBAL * bit set so sometimes the TLB can use a lingering entry. * * This verifies that the fault happens in kernel space * and that the fault was not a protection error (error_code & 1). */ if (address >= VMALLOC_START && !protection && !user_mode(regs)) goto vmalloc_fault; /* When stack execution is not allowed we store the signal * trampolines in the reserved cris_signal_return_page. * Handle this in the exact same way as vmalloc (we know * that the mapping is there and is valid so no need to * call handle_mm_fault). */ if (cris_signal_return_page && address == cris_signal_return_page && !protection && user_mode(regs)) goto vmalloc_fault; /* we can and should enable interrupts at this point */ local_irq_enable(); mm = tsk->mm; info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or "atomic" operation or have no * user context, we must not take the fault. */ if (in_atomic() || !mm) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (user_mode(regs)) { /* * accessing the stack below usp is always a bug. * we get page-aligned addresses so we can only check * if we're within a page from usp, but that might be * enough to catch brutal errors at least. */ if (address + PAGE_SIZE < rdusp()) goto bad_area; } if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; /* first do some preliminary protection checks */ if (writeaccess == 2){ if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else if (writeaccess == 1) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: DPG(show_registers(regs)); /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { printk(KERN_NOTICE "%s (pid %d) segfaults for page " "address %08lx at pc %08lx\n", tsk->comm, tsk->pid, address, instruction_pointer(regs)); /* With DPG on, we've already dumped registers above. */ DPG(if (0)) show_registers(regs); #ifdef CONFIG_NO_SEGFAULT_TERMINATION DECLARE_WAIT_QUEUE_HEAD(wq); wait_event_interruptible(wq, 0 == 1); #else info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void *)address; force_sig_info(SIGSEGV, &info, tsk); #endif return; } no_context: /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source * when it accesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) */ if (find_fixup_code(regs)) return; /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if (!oops_in_progress) { oops_in_progress = 1; if ((unsigned long) (address) < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL " "pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel access" " at virtual address %08lx\n", address); die_if_kernel("Oops", regs, (writeaccess << 1) | protection); oops_in_progress = 0; } do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *)address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Use current_pgd instead of tsk->active_mm->pgd * since the latter might be unavailable if this * code is executed in a misfortunately run irq * (like inside schedule() between switch_mm and * switch_to...). */ int offset = pgd_index(address); pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset; pgd_k = init_mm.pgd + offset; /* Since we're two-level, we don't need to do both * set_pgd and set_pmd (they do the same thing). If * we go three-level at some point, do the right thing * with pgd_present and set_pgd here. * * Also, since the vmalloc area is global, we don't * need to copy individual PTE's, it is enough to * copy the pgd pointer into the pte page of the * root task. If that is there, we'll find our pte if * it exists. */ pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto no_context; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto bad_area_nosemaphore; set_pmd(pmd, *pmd_k); /* Make sure the actual PTE exists as well to * catch kernel vmalloc-area accesses to non-mapped * addresses. If we don't do this, this will just * silently loop forever. */ pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } } /* Find fixup code. */ int find_fixup_code(struct pt_regs *regs) { const struct exception_table_entry *fixup; /* in case of delay slot fault (v32) */ unsigned long ip = (instruction_pointer(regs) & ~0x1); fixup = search_exception_tables(ip); if (fixup != 0) { /* Adjust the instruction pointer in the stackframe. */ instruction_pointer(regs) = fixup->fixup; arch_fixup(regs); return 1; } return 0; }
gpl-2.0
V4MSHI/S7562_kernel
drivers/leds/leds-lp3944.c
4153
11398
/* * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip * * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ /* * I2C driver for National Semiconductor LP3944 Funlight Chip * http://www.national.com/pf/LP/LP3944.html * * This helper chip can drive up to 8 leds, with two programmable DIM modes; * it could even be used as a gpio expander but this driver assumes it is used * as a led controller. * * The DIM modes are used to set _blink_ patterns for leds, the pattern is * specified supplying two parameters: * - period: from 0s to 1.6s * - duty cycle: percentage of the period the led is on, from 0 to 100 * * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb * leds, the camera flash light and the displays backlights. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/leds-lp3944.h> /* Read Only Registers */ #define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */ #define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */ #define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */ #define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */ #define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */ #define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */ #define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */ #define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */ /* These registers are not used to control leds in LP3944, they can store * arbitrary values which the chip will ignore. */ #define LP3944_REG_REGISTER8 0x08 #define LP3944_REG_REGISTER9 0x09 #define LP3944_DIM0 0 #define LP3944_DIM1 1 /* period in ms */ #define LP3944_PERIOD_MIN 0 #define LP3944_PERIOD_MAX 1600 /* duty cycle is a percentage */ #define LP3944_DUTY_CYCLE_MIN 0 #define LP3944_DUTY_CYCLE_MAX 100 #define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev) /* Saved data */ struct lp3944_led_data { u8 id; enum lp3944_type type; enum lp3944_status status; struct led_classdev ldev; struct i2c_client *client; struct work_struct work; }; struct lp3944_data { struct mutex lock; struct i2c_client *client; struct lp3944_led_data leds[LP3944_LEDS_MAX]; }; static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value) { int tmp; tmp = i2c_smbus_read_byte_data(client, reg); if (tmp < 0) return -EINVAL; *value = tmp; return 0; } static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /** * Set the period for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @period: period of a blink, that is a on/off cycle, expressed in ms. */ static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period) { u8 psc_reg; u8 psc_value; int err; if (dim == LP3944_DIM0) psc_reg = LP3944_REG_PSC0; else if (dim == LP3944_DIM1) psc_reg = LP3944_REG_PSC1; else return -EINVAL; /* Convert period to Prescaler value */ if (period > LP3944_PERIOD_MAX) return -EINVAL; psc_value = (period * 255) / LP3944_PERIOD_MAX; err = lp3944_reg_write(client, psc_reg, psc_value); return err; } /** * Set the duty cycle for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @duty_cycle: percentage of a period during which a led is ON */ static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim, u8 duty_cycle) { u8 pwm_reg; u8 pwm_value; int err; if (dim == LP3944_DIM0) pwm_reg = LP3944_REG_PWM0; else if (dim == LP3944_DIM1) pwm_reg = LP3944_REG_PWM1; else return -EINVAL; /* Convert duty cycle to PWM value */ if (duty_cycle > LP3944_DUTY_CYCLE_MAX) return -EINVAL; pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX; err = lp3944_reg_write(client, pwm_reg, pwm_value); return err; } /** * Set the led status * * @led: a lp3944_led_data structure * @status: one of LP3944_LED_STATUS_OFF * LP3944_LED_STATUS_ON * LP3944_LED_STATUS_DIM0 * LP3944_LED_STATUS_DIM1 */ static int lp3944_led_set(struct lp3944_led_data *led, u8 status) { struct lp3944_data *data = i2c_get_clientdata(led->client); u8 id = led->id; u8 reg; u8 val = 0; int err; dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n", __func__, led->ldev.name, status); switch (id) { case LP3944_LED0: case LP3944_LED1: case LP3944_LED2: case LP3944_LED3: reg = LP3944_REG_LS0; break; case LP3944_LED4: case LP3944_LED5: case LP3944_LED6: case LP3944_LED7: id -= LP3944_LED4; reg = LP3944_REG_LS1; break; default: return -EINVAL; } if (status > LP3944_LED_STATUS_DIM1) return -EINVAL; /* invert only 0 and 1, leave unchanged the other values, * remember we are abusing status to set blink patterns */ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2) status = 1 - status; mutex_lock(&data->lock); lp3944_reg_read(led->client, reg, &val); val &= ~(LP3944_LED_STATUS_MASK << (id << 1)); val |= (status << (id << 1)); dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n", __func__, led->ldev.name, reg, id, status, val); /* set led status */ err = lp3944_reg_write(led->client, reg, val); mutex_unlock(&data->lock); return err; } static int lp3944_led_set_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct lp3944_led_data *led = ldev_to_led(led_cdev); u16 period; u8 duty_cycle; int err; /* units are in ms */ if (*delay_on + *delay_off > LP3944_PERIOD_MAX) return -EINVAL; if (*delay_on == 0 && *delay_off == 0) { /* Special case: the leds subsystem requires a default user * friendly blink pattern for the LED. Let's blink the led * slowly (1Hz). */ *delay_on = 500; *delay_off = 500; } period = (*delay_on) + (*delay_off); /* duty_cycle is the percentage of period during which the led is ON */ duty_cycle = 100 * (*delay_on) / period; /* invert duty cycle for inverted leds, this has the same effect of * swapping delay_on and delay_off */ if (led->type == LP3944_LED_TYPE_LED_INVERTED) duty_cycle = 100 - duty_cycle; /* NOTE: using always the first DIM mode, this means that all leds * will have the same blinking pattern. * * We could find a way later to have two leds blinking in hardware * with different patterns at the same time, falling back to software * control for the other ones. */ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period); if (err) return err; err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle); if (err) return err; dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n", __func__); led->status = LP3944_LED_STATUS_DIM0; schedule_work(&led->work); return 0; } static void lp3944_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { struct lp3944_led_data *led = ldev_to_led(led_cdev); dev_dbg(&led->client->dev, "%s: %s, %d\n", __func__, led_cdev->name, brightness); led->status = brightness; schedule_work(&led->work); } static void lp3944_led_work(struct work_struct *work) { struct lp3944_led_data *led; led = container_of(work, struct lp3944_led_data, work); lp3944_led_set(led, led->status); } static int lp3944_configure(struct i2c_client *client, struct lp3944_data *data, struct lp3944_platform_data *pdata) { int i, err = 0; for (i = 0; i < pdata->leds_size; i++) { struct lp3944_led *pled = &pdata->leds[i]; struct lp3944_led_data *led = &data->leds[i]; led->client = client; led->id = i; switch (pled->type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led->type = pled->type; led->status = pled->status; led->ldev.name = pled->name; led->ldev.max_brightness = 1; led->ldev.brightness_set = lp3944_led_set_brightness; led->ldev.blink_set = lp3944_led_set_blink; led->ldev.flags = LED_CORE_SUSPENDRESUME; INIT_WORK(&led->work, lp3944_led_work); err = led_classdev_register(&client->dev, &led->ldev); if (err < 0) { dev_err(&client->dev, "couldn't register LED %s\n", led->ldev.name); goto exit; } /* to expose the default value to userspace */ led->ldev.brightness = led->status; /* Set the default led status */ err = lp3944_led_set(led, led->status); if (err < 0) { dev_err(&client->dev, "%s couldn't set STATUS %d\n", led->ldev.name, led->status); goto exit; } break; case LP3944_LED_TYPE_NONE: default: break; } } return 0; exit: if (i > 0) for (i = i - 1; i >= 0; i--) switch (pdata->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } return err; } static int __devinit lp3944_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; struct lp3944_data *data; int err; if (lp3944_pdata == NULL) { dev_err(&client->dev, "no platform data\n"); return -EINVAL; } /* Let's see whether this adapter can support what we need. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "insufficient functionality!\n"); return -ENODEV; } data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; i2c_set_clientdata(client, data); mutex_init(&data->lock); err = lp3944_configure(client, data, lp3944_pdata); if (err < 0) { kfree(data); return err; } dev_info(&client->dev, "lp3944 enabled\n"); return 0; } static int __devexit lp3944_remove(struct i2c_client *client) { struct lp3944_platform_data *pdata = client->dev.platform_data; struct lp3944_data *data = i2c_get_clientdata(client); int i; for (i = 0; i < pdata->leds_size; i++) switch (data->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } kfree(data); return 0; } /* lp3944 i2c driver struct */ static const struct i2c_device_id lp3944_id[] = { {"lp3944", 0}, {} }; MODULE_DEVICE_TABLE(i2c, lp3944_id); static struct i2c_driver lp3944_driver = { .driver = { .name = "lp3944", }, .probe = lp3944_probe, .remove = __devexit_p(lp3944_remove), .id_table = lp3944_id, }; static int __init lp3944_module_init(void) { return i2c_add_driver(&lp3944_driver); } static void __exit lp3944_module_exit(void) { i2c_del_driver(&lp3944_driver); } module_init(lp3944_module_init); module_exit(lp3944_module_exit); MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); MODULE_DESCRIPTION("LP3944 Fun Light Chip"); MODULE_LICENSE("GPL");
gpl-2.0
Jazz-823/kernel_lge_hammerhead_M
arch/s390/kernel/setup.c
4409
29587
/* * arch/s390/kernel/setup.c * * S390 version * Copyright (C) IBM Corp. 1999,2012 * Author(s): Hartmut Penner (hp@de.ibm.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * * Derived from "arch/i386/kernel/setup.c" * Copyright (C) 1995, Linus Torvalds */ /* * This file handles the architecture-dependent parts of initialization */ #define KMSG_COMPONENT "setup" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/bootmem.h> #include <linux/root_dev.h> #include <linux/console.h> #include <linux/kernel_stat.h> #include <linux/device.h> #include <linux/notifier.h> #include <linux/pfn.h> #include <linux/ctype.h> #include <linux/reboot.h> #include <linux/topology.h> #include <linux/ftrace.h> #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/memory.h> #include <linux/compat.h> #include <asm/ipl.h> #include <asm/uaccess.h> #include <asm/facility.h> #include <asm/smp.h> #include <asm/mmu_context.h> #include <asm/cpcmd.h> #include <asm/lowcore.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/sections.h> #include <asm/ebcdic.h> #include <asm/kvm_virtio.h> #include <asm/diag.h> #include <asm/os_info.h> #include "entry.h" long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | PSW_MASK_EA | PSW_MASK_BA; long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_ASC_HOME; /* * User copy operations. */ struct uaccess_ops uaccess; EXPORT_SYMBOL(uaccess); /* * Machine setup.. */ unsigned int console_mode = 0; EXPORT_SYMBOL(console_mode); unsigned int console_devno = -1; EXPORT_SYMBOL(console_devno); unsigned int console_irq = -1; EXPORT_SYMBOL(console_irq); unsigned long elf_hwcap = 0; char elf_platform[ELF_PLATFORM_SIZE]; struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; int __initdata memory_end_set; unsigned long __initdata memory_end; unsigned long VMALLOC_START; EXPORT_SYMBOL(VMALLOC_START); unsigned long VMALLOC_END; EXPORT_SYMBOL(VMALLOC_END); struct page *vmemmap; EXPORT_SYMBOL(vmemmap); /* An array with a pointer to the lowcore of every CPU. */ struct _lowcore *lowcore_ptr[NR_CPUS]; EXPORT_SYMBOL(lowcore_ptr); /* * This is set up by the setup-routine at boot-time * for S390 need to find out, what we have to setup * using address 0x10400 ... */ #include <asm/setup.h> /* * condev= and conmode= setup parameter. */ static int __init condev_setup(char *str) { int vdev; vdev = simple_strtoul(str, &str, 0); if (vdev >= 0 && vdev < 65536) { console_devno = vdev; console_irq = -1; } return 1; } __setup("condev=", condev_setup); static void __init set_preferred_console(void) { if (MACHINE_IS_KVM) add_preferred_console("hvc", 0, NULL); else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) add_preferred_console("ttyS", 0, NULL); else if (CONSOLE_IS_3270) add_preferred_console("tty3270", 0, NULL); } static int __init conmode_setup(char *str) { #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) SET_CONSOLE_SCLP; #endif #if defined(CONFIG_TN3215_CONSOLE) if (strncmp(str, "3215", 5) == 0) SET_CONSOLE_3215; #endif #if defined(CONFIG_TN3270_CONSOLE) if (strncmp(str, "3270", 5) == 0) SET_CONSOLE_3270; #endif set_preferred_console(); return 1; } __setup("conmode=", conmode_setup); static void __init conmode_default(void) { char query_buffer[1024]; char *ptr; if (MACHINE_IS_VM) { cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); console_devno = simple_strtoul(query_buffer + 5, NULL, 16); ptr = strstr(query_buffer, "SUBCHANNEL ="); console_irq = simple_strtoul(ptr + 13, NULL, 16); cpcmd("QUERY TERM", query_buffer, 1024, NULL); ptr = strstr(query_buffer, "CONMODE"); /* * Set the conmode to 3215 so that the device recognition * will set the cu_type of the console to 3215. If the * conmode is 3270 and we don't set it back then both * 3215 and the 3270 driver will try to access the console * device (3215 as console and 3270 as normal tty). */ cpcmd("TERM CONMODE 3215", NULL, 0, NULL); if (ptr == NULL) { #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif return; } if (strncmp(ptr + 8, "3270", 4) == 0) { #if defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; #elif defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } else if (strncmp(ptr + 8, "3215", 4) == 0) { #if defined(CONFIG_TN3215_CONSOLE) SET_CONSOLE_3215; #elif defined(CONFIG_TN3270_CONSOLE) SET_CONSOLE_3270; #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } } else { #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) SET_CONSOLE_SCLP; #endif } } #ifdef CONFIG_ZFCPDUMP static void __init setup_zfcpdump(unsigned int console_devno) { static char str[41]; if (ipl_info.type != IPL_TYPE_FCP_DUMP) return; if (OLDMEM_BASE) return; if (console_devno != -1) sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", ipl_info.data.fcp.dev_id.devno, console_devno); else sprintf(str, " cio_ignore=all,!0.0.%04x", ipl_info.data.fcp.dev_id.devno); strcat(boot_command_line, str); console_loglevel = 2; } #else static inline void setup_zfcpdump(unsigned int console_devno) {} #endif /* CONFIG_ZFCPDUMP */ /* * Reboot, halt and power_off stubs. They just call _machine_restart, * _machine_halt or _machine_power_off. */ void machine_restart(char *command) { if ((!in_interrupt() && !in_atomic()) || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us. */ console_unblank(); _machine_restart(command); } void machine_halt(void) { if (!in_interrupt() || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us. */ console_unblank(); _machine_halt(); } void machine_power_off(void) { if (!in_interrupt() || oops_in_progress) /* * Only unblank the console if we are called in enabled * context or a bust_spinlocks cleared the way for us. */ console_unblank(); _machine_power_off(); } /* * Dummy power off function. */ void (*pm_power_off)(void) = machine_power_off; static int __init early_parse_mem(char *p) { memory_end = memparse(p, &p); memory_end_set = 1; return 0; } early_param("mem", early_parse_mem); static int __init parse_vmalloc(char *arg) { if (!arg) return -EINVAL; VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK; return 0; } early_param("vmalloc", parse_vmalloc); unsigned int user_mode = HOME_SPACE_MODE; EXPORT_SYMBOL_GPL(user_mode); static int set_amode_primary(void) { psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; #ifdef CONFIG_COMPAT psw32_user_bits = (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; #endif if (MACHINE_HAS_MVCOS) { memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); return 1; } else { memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); return 0; } } /* * Switch kernel/user addressing modes? */ static int __init early_parse_switch_amode(char *p) { user_mode = PRIMARY_SPACE_MODE; return 0; } early_param("switch_amode", early_parse_switch_amode); static int __init early_parse_user_mode(char *p) { if (p && strcmp(p, "primary") == 0) user_mode = PRIMARY_SPACE_MODE; else if (!p || strcmp(p, "home") == 0) user_mode = HOME_SPACE_MODE; else return 1; return 0; } early_param("user_mode", early_parse_user_mode); static void setup_addressing_mode(void) { if (user_mode == PRIMARY_SPACE_MODE) { if (set_amode_primary()) pr_info("Address spaces switched, " "mvcos available\n"); else pr_info("Address spaces switched, " "mvcos not available\n"); } } void *restart_stack __attribute__((__section__(".data"))); static void __init setup_lowcore(void) { struct _lowcore *lc; /* * Setup lowcore for boot cpu */ BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc->restart_psw.mask = psw_kernel_bits; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; lc->external_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->external_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) ext_int_handler; lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; lc->program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->program_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; lc->mcck_new_psw.mask = psw_kernel_bits; lc->mcck_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; lc->io_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; lc->async_stack = (unsigned long) __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; lc->panic_stack = (unsigned long) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; lc->machine_flags = S390_lowcore.machine_flags; lc->stfl_fac_list = S390_lowcore.stfl_fac_list; memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, MAX_FACILITY_BIT/8); #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = (__u32) __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); /* enable extended save area */ __ctl_set_bit(14, 29); } #else lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif lc->sync_enter_timer = S390_lowcore.sync_enter_timer; lc->async_enter_timer = S390_lowcore.async_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; lc->user_timer = S390_lowcore.user_timer; lc->system_timer = S390_lowcore.system_timer; lc->steal_timer = S390_lowcore.steal_timer; lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_clock = S390_lowcore.last_update_clock; lc->ftrace_func = S390_lowcore.ftrace_func; restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); restart_stack += ASYNC_SIZE; /* * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant * restart data to the absolute zero lowcore. This is necesary if * PSW restart is done on an offline CPU that has lowcore zero. */ lc->restart_stack = (unsigned long) restart_stack; lc->restart_fn = (unsigned long) do_restart; lc->restart_data = 0; lc->restart_source = -1UL; memcpy(&S390_lowcore.restart_stack, &lc->restart_stack, 4*sizeof(unsigned long)); copy_to_absolute_zero(&S390_lowcore.restart_psw, &lc->restart_psw, sizeof(psw_t)); set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; } static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; static struct resource data_resource = { .name = "Kernel data", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; static struct resource bss_resource = { .name = "Kernel bss", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; static struct resource __initdata *standard_resources[] = { &code_resource, &data_resource, &bss_resource, }; static void __init setup_resources(void) { struct resource *res, *std_res, *sub_res; int i, j; code_resource.start = (unsigned long) &_text; code_resource.end = (unsigned long) &_etext - 1; data_resource.start = (unsigned long) &_etext; data_resource.end = (unsigned long) &_edata - 1; bss_resource.start = (unsigned long) &__bss_start; bss_resource.end = (unsigned long) &__bss_stop - 1; for (i = 0; i < MEMORY_CHUNKS; i++) { if (!memory_chunk[i].size) continue; if (memory_chunk[i].type == CHUNK_OLDMEM || memory_chunk[i].type == CHUNK_CRASHK) continue; res = alloc_bootmem_low(sizeof(*res)); res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; switch (memory_chunk[i].type) { case CHUNK_READ_WRITE: case CHUNK_CRASHK: res->name = "System RAM"; break; case CHUNK_READ_ONLY: res->name = "System ROM"; res->flags |= IORESOURCE_READONLY; break; default: res->name = "reserved"; } res->start = memory_chunk[i].addr; res->end = res->start + memory_chunk[i].size - 1; request_resource(&iomem_resource, res); for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { std_res = standard_resources[j]; if (std_res->start < res->start || std_res->start > res->end) continue; if (std_res->end > res->end) { sub_res = alloc_bootmem_low(sizeof(*sub_res)); *sub_res = *std_res; sub_res->end = res->end; std_res->start = res->end + 1; request_resource(res, sub_res); } else { request_resource(res, std_res); } } } } unsigned long real_memory_size; EXPORT_SYMBOL_GPL(real_memory_size); static void __init setup_memory_end(void) { unsigned long vmax, vmalloc_size, tmp; int i; #ifdef CONFIG_ZFCPDUMP if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) { memory_end = ZFCPDUMP_HSA_SIZE; memory_end_set = 1; } #endif real_memory_size = 0; memory_end &= PAGE_MASK; /* * Make sure all chunks are MAX_ORDER aligned so we don't need the * extra checks that HOLES_IN_ZONE would require. */ for (i = 0; i < MEMORY_CHUNKS; i++) { unsigned long start, end; struct mem_chunk *chunk; unsigned long align; chunk = &memory_chunk[i]; align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); start = (chunk->addr + align - 1) & ~(align - 1); end = (chunk->addr + chunk->size) & ~(align - 1); if (start >= end) memset(chunk, 0, sizeof(*chunk)); else { chunk->addr = start; chunk->size = end - start; } real_memory_size = max(real_memory_size, chunk->addr + chunk->size); } /* Choose kernel address space layout: 2, 3, or 4 levels. */ #ifdef CONFIG_64BIT vmalloc_size = VMALLOC_END ?: 128UL << 30; tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; if (tmp <= (1UL << 42)) vmax = 1UL << 42; /* 3-level kernel page table */ else vmax = 1UL << 53; /* 4-level kernel page table */ #else vmalloc_size = VMALLOC_END ?: 96UL << 20; vmax = 1UL << 31; /* 2-level kernel page table */ #endif /* vmalloc area is at the end of the kernel address space. */ VMALLOC_END = vmax; VMALLOC_START = vmax - vmalloc_size; /* Split remaining virtual space between 1:1 mapping & vmemmap array */ tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); tmp = VMALLOC_START - tmp * sizeof(struct page); tmp &= ~((vmax >> 11) - 1); /* align to page table level */ tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); vmemmap = (struct page *) tmp; /* Take care that memory_end is set and <= vmemmap */ memory_end = min(memory_end ?: real_memory_size, tmp); /* Fixup memory chunk array to fit into 0..memory_end */ for (i = 0; i < MEMORY_CHUNKS; i++) { struct mem_chunk *chunk = &memory_chunk[i]; if (chunk->addr >= memory_end) { memset(chunk, 0, sizeof(*chunk)); continue; } if (chunk->addr + chunk->size > memory_end) chunk->size = memory_end - chunk->addr; } } static void __init setup_vmcoreinfo(void) { #ifdef CONFIG_KEXEC unsigned long ptr = paddr_vmcoreinfo_note(); copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); #endif } #ifdef CONFIG_CRASH_DUMP /* * Find suitable location for crashkernel memory */ static unsigned long __init find_crash_base(unsigned long crash_size, char **msg) { unsigned long crash_base; struct mem_chunk *chunk; int i; if (memory_chunk[0].size < crash_size) { *msg = "first memory chunk must be at least crashkernel size"; return 0; } if (OLDMEM_BASE && crash_size == OLDMEM_SIZE) return OLDMEM_BASE; for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { chunk = &memory_chunk[i]; if (chunk->size == 0) continue; if (chunk->type != CHUNK_READ_WRITE) continue; if (chunk->size < crash_size) continue; crash_base = (chunk->addr + chunk->size) - crash_size; if (crash_base < crash_size) continue; if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) continue; if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) continue; return crash_base; } *msg = "no suitable area found"; return 0; } /* * Check if crash_base and crash_size is valid */ static int __init verify_crash_base(unsigned long crash_base, unsigned long crash_size, char **msg) { struct mem_chunk *chunk; int i; /* * Because we do the swap to zero, we must have at least 'crash_size' * bytes free space before crash_base */ if (crash_size > crash_base) { *msg = "crashkernel offset must be greater than size"; return -EINVAL; } /* First memory chunk must be at least crash_size */ if (memory_chunk[0].size < crash_size) { *msg = "first memory chunk must be at least crashkernel size"; return -EINVAL; } /* Check if we fit into the respective memory chunk */ for (i = 0; i < MEMORY_CHUNKS; i++) { chunk = &memory_chunk[i]; if (chunk->size == 0) continue; if (crash_base < chunk->addr) continue; if (crash_base >= chunk->addr + chunk->size) continue; /* we have found the memory chunk */ if (crash_base + crash_size > chunk->addr + chunk->size) { *msg = "selected memory chunk is too small for " "crashkernel memory"; return -EINVAL; } return 0; } *msg = "invalid memory range specified"; return -EINVAL; } /* * Reserve kdump memory by creating a memory hole in the mem_chunk array */ static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, int type) { create_mem_hole(memory_chunk, addr, size, type); } /* * When kdump is enabled, we have to ensure that no memory from * the area [0 - crashkernel memory size] and * [crashk_res.start - crashk_res.end] is set offline. */ static int kdump_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct memory_notify *arg = data; if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) return NOTIFY_BAD; if (arg->start_pfn > PFN_DOWN(crashk_res.end)) return NOTIFY_OK; if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) return NOTIFY_OK; return NOTIFY_BAD; } static struct notifier_block kdump_mem_nb = { .notifier_call = kdump_mem_notifier, }; #endif /* * Make sure that oldmem, where the dump is stored, is protected */ static void reserve_oldmem(void) { #ifdef CONFIG_CRASH_DUMP if (!OLDMEM_BASE) return; reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, CHUNK_OLDMEM); if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; else saved_max_pfn = PFN_DOWN(real_memory_size) - 1; #endif } /* * Reserve memory for kdump kernel to be loaded with kexec */ static void __init reserve_crashkernel(void) { #ifdef CONFIG_CRASH_DUMP unsigned long long crash_base, crash_size; char *msg = NULL; int rc; rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, &crash_base); if (rc || crash_size == 0) return; crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); if (register_memory_notifier(&kdump_mem_nb)) return; if (!crash_base) crash_base = find_crash_base(crash_size, &msg); if (!crash_base) { pr_info("crashkernel reservation failed: %s\n", msg); unregister_memory_notifier(&kdump_mem_nb); return; } if (verify_crash_base(crash_base, crash_size, &msg)) { pr_info("crashkernel reservation failed: %s\n", msg); unregister_memory_notifier(&kdump_mem_nb); return; } if (!OLDMEM_BASE && MACHINE_IS_VM) diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); pr_info("Reserving %lluMB of memory at %lluMB " "for crashkernel (System RAM: %luMB)\n", crash_size >> 20, crash_base >> 20, memory_end >> 20); os_info_crashkernel_add(crash_base, crash_size); #endif } static void __init setup_memory(void) { unsigned long bootmap_size; unsigned long start_pfn, end_pfn; int i; /* * partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(&_end)); end_pfn = max_pfn = PFN_DOWN(memory_end); #ifdef CONFIG_BLK_DEV_INITRD /* * Move the initrd in case the bitmap of the bootmem allocater * would overwrite it. */ if (INITRD_START && INITRD_SIZE) { unsigned long bmap_size; unsigned long start; bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); bmap_size = PFN_PHYS(bmap_size); if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; #ifdef CONFIG_CRASH_DUMP if (OLDMEM_BASE) { /* Move initrd behind kdump oldmem */ if (start + INITRD_SIZE > OLDMEM_BASE && start < OLDMEM_BASE + OLDMEM_SIZE) start = OLDMEM_BASE + OLDMEM_SIZE; } #endif if (start + INITRD_SIZE > memory_end) { pr_err("initrd extends beyond end of " "memory (0x%08lx > 0x%08lx) " "disabling initrd\n", start + INITRD_SIZE, memory_end); INITRD_START = INITRD_SIZE = 0; } else { pr_info("Moving initrd (0x%08lx -> " "0x%08lx, size: %ld)\n", INITRD_START, start, INITRD_SIZE); memmove((void *) start, (void *) INITRD_START, INITRD_SIZE); INITRD_START = start; } } } #endif /* * Initialize the boot-time allocator */ bootmap_size = init_bootmem(start_pfn, end_pfn); /* * Register RAM areas with the bootmem allocator. */ for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { unsigned long start_chunk, end_chunk, pfn; if (memory_chunk[i].type != CHUNK_READ_WRITE && memory_chunk[i].type != CHUNK_CRASHK) continue; start_chunk = PFN_DOWN(memory_chunk[i].addr); end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); end_chunk = min(end_chunk, end_pfn); if (start_chunk >= end_chunk) continue; memblock_add_node(PFN_PHYS(start_chunk), PFN_PHYS(end_chunk - start_chunk), 0); pfn = max(start_chunk, start_pfn); for (; pfn < end_chunk; pfn++) page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY, 0); } psw_set_key(PAGE_DEFAULT_KEY); free_bootmem_with_active_regions(0, max_pfn); /* * Reserve memory used for lowcore/command line/kernel image. */ reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); reserve_bootmem((unsigned long)_stext, PFN_PHYS(start_pfn) - (unsigned long)_stext, BOOTMEM_DEFAULT); /* * Reserve the bootmem bitmap itself as well. We do this in two * steps (first step was init_bootmem()) because this catches * the (very unlikely) case of us accidentally initializing the * bootmem allocator with an invalid RAM area. */ reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, BOOTMEM_DEFAULT); #ifdef CONFIG_CRASH_DUMP if (crashk_res.start) reserve_bootmem(crashk_res.start, crashk_res.end - crashk_res.start + 1, BOOTMEM_DEFAULT); if (is_kdump_kernel()) reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); #endif #ifdef CONFIG_BLK_DEV_INITRD if (INITRD_START && INITRD_SIZE) { if (INITRD_START + INITRD_SIZE <= memory_end) { reserve_bootmem(INITRD_START, INITRD_SIZE, BOOTMEM_DEFAULT); initrd_start = INITRD_START; initrd_end = initrd_start + INITRD_SIZE; } else { pr_err("initrd extends beyond end of " "memory (0x%08lx > 0x%08lx) " "disabling initrd\n", initrd_start + INITRD_SIZE, memory_end); initrd_start = initrd_end = 0; } } #endif } /* * Setup hardware capabilities. */ static void __init setup_hwcaps(void) { static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; struct cpuid cpu_id; int i; /* * The store facility list bits numbers as found in the principles * of operation are numbered with bit 1UL<<31 as number 0 to * bit 1UL<<0 as number 31. * Bit 0: instructions named N3, "backported" to esa-mode * Bit 2: z/Architecture mode is active * Bit 7: the store-facility-list-extended facility is installed * Bit 17: the message-security assist is installed * Bit 19: the long-displacement facility is installed * Bit 21: the extended-immediate facility is installed * Bit 22: extended-translation facility 3 is installed * Bit 30: extended-translation facility 3 enhancement facility * These get translated to: * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and * HWCAP_S390_ETF3EH bit 8 (22 && 30). */ for (i = 0; i < 6; i++) if (test_facility(stfl_bits[i])) elf_hwcap |= 1UL << i; if (test_facility(22) && test_facility(30)) elf_hwcap |= HWCAP_S390_ETF3EH; /* * Check for additional facilities with store-facility-list-extended. * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information * as stored by stfl, bits 32-xxx contain additional facilities. * How many facility words are stored depends on the number of * doublewords passed to the instruction. The additional facilities * are: * Bit 42: decimal floating point facility is installed * Bit 44: perform floating point operation facility is installed * translated to: * HWCAP_S390_DFP bit 6 (42 && 44). */ if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44)) elf_hwcap |= HWCAP_S390_DFP; /* * Huge page support HWCAP_S390_HPAGE is bit 7. */ if (MACHINE_HAS_HPAGE) elf_hwcap |= HWCAP_S390_HPAGE; /* * 64-bit register support for 31-bit processes * HWCAP_S390_HIGH_GPRS is bit 9. */ elf_hwcap |= HWCAP_S390_HIGH_GPRS; get_cpu_id(&cpu_id); switch (cpu_id.machine) { case 0x9672: #if !defined(CONFIG_64BIT) default: /* Use "g5" as default for 31 bit kernels. */ #endif strcpy(elf_platform, "g5"); break; case 0x2064: case 0x2066: #if defined(CONFIG_64BIT) default: /* Use "z900" as default for 64 bit kernels. */ #endif strcpy(elf_platform, "z900"); break; case 0x2084: case 0x2086: strcpy(elf_platform, "z990"); break; case 0x2094: case 0x2096: strcpy(elf_platform, "z9-109"); break; case 0x2097: case 0x2098: strcpy(elf_platform, "z10"); break; case 0x2817: case 0x2818: strcpy(elf_platform, "z196"); break; } } /* * Setup function called from init/main.c just after the banner * was printed. */ void __init setup_arch(char **cmdline_p) { /* * print what head.S has found out about the machine */ #ifndef CONFIG_64BIT if (MACHINE_IS_VM) pr_info("Linux is running as a z/VM " "guest operating system in 31-bit mode\n"); else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 31-bit mode\n"); if (MACHINE_HAS_IEEE) pr_info("The hardware system has IEEE compatible " "floating point units\n"); else pr_info("The hardware system has no IEEE compatible " "floating point units\n"); #else /* CONFIG_64BIT */ if (MACHINE_IS_VM) pr_info("Linux is running as a z/VM " "guest operating system in 64-bit mode\n"); else if (MACHINE_IS_KVM) pr_info("Linux is running under KVM in 64-bit mode\n"); else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 64-bit mode\n"); #endif /* CONFIG_64BIT */ /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ *cmdline_p = boot_command_line; ROOT_DEV = Root_RAM0; init_mm.start_code = PAGE_OFFSET; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; if (MACHINE_HAS_MVCOS) memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); else memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); parse_early_param(); os_info_init(); setup_ipl(); setup_memory_end(); setup_addressing_mode(); reserve_oldmem(); reserve_crashkernel(); setup_memory(); setup_resources(); setup_vmcoreinfo(); setup_lowcore(); cpu_init(); s390_init_cpu_topology(); /* * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). */ setup_hwcaps(); /* * Create kernel page tables and switch to virtual addressing. */ paging_init(); /* Setup default console */ conmode_default(); set_preferred_console(); /* Setup zfcpdump support */ setup_zfcpdump(console_devno); }
gpl-2.0
danielhk/android_kernel_samsung_smdk4210
arch/um/kernel/um_arch.c
4665
9061
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/delay.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/utsname.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/setup.h> #include "as-layout.h" #include "arch.h" #include "init.h" #include "kern.h" #include "kern_util.h" #include "mem_user.h" #include "os.h" #define DEFAULT_COMMAND_LINE "root=98:0" /* Changed in add_arg and setup_arch, which run before SMP is started */ static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 }; static void __init add_arg(char *arg) { if (strlen(command_line) + strlen(arg) + 1 > COMMAND_LINE_SIZE) { printf("add_arg: Too many command line arguments!\n"); exit(1); } if (strlen(command_line) > 0) strcat(command_line, " "); strcat(command_line, arg); } /* * These fields are initialized at boot time and not changed. * XXX This structure is used only in the non-SMP case. Maybe this * should be moved to smp.c. */ struct cpuinfo_um boot_cpu_data = { .loops_per_jiffy = 0, .ipi_pipe = { -1, -1 } }; unsigned long thread_saved_pc(struct task_struct *task) { /* FIXME: Need to look up userspace_pid by cpu */ return os_process_pc(userspace_pid[0]); } /* Changed in setup_arch, which is called in early boot */ static char host_info[(__NEW_UTS_LEN + 1) * 5]; static int show_cpuinfo(struct seq_file *m, void *v) { int index = 0; #ifdef CONFIG_SMP index = (struct cpuinfo_um *) v - cpu_data; if (!cpu_online(index)) return 0; #endif seq_printf(m, "processor\t: %d\n", index); seq_printf(m, "vendor_id\t: User Mode Linux\n"); seq_printf(m, "model name\t: UML\n"); seq_printf(m, "mode\t\t: skas\n"); seq_printf(m, "host\t\t: %s\n", host_info); seq_printf(m, "bogomips\t: %lu.%02lu\n\n", loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < NR_CPUS ? cpu_data + *pos : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, }; /* Set in linux_main */ unsigned long uml_physmem; unsigned long uml_reserved; /* Also modified in mem_init */ unsigned long start_vm; unsigned long end_vm; /* Set in uml_ncpus_setup */ int ncpus = 1; /* Set in early boot */ static int have_root __initdata = 0; /* Set in uml_mem_setup and modified in linux_main */ long long physmem_size = 32 * 1024 * 1024; static const char *usage_string = "User Mode Linux v%s\n" " available at http://user-mode-linux.sourceforge.net/\n\n"; static int __init uml_version_setup(char *line, int *add) { printf("%s\n", init_utsname()->release); exit(0); return 0; } __uml_setup("--version", uml_version_setup, "--version\n" " Prints the version number of the kernel.\n\n" ); static int __init uml_root_setup(char *line, int *add) { have_root = 1; return 0; } __uml_setup("root=", uml_root_setup, "root=<file containing the root fs>\n" " This is actually used by the generic kernel in exactly the same\n" " way as in any other kernel. If you configure a number of block\n" " devices and want to boot off something other than ubd0, you \n" " would use something like:\n" " root=/dev/ubd5\n\n" ); static int __init no_skas_debug_setup(char *line, int *add) { printf("'debug' is not necessary to gdb UML in skas mode - run \n"); printf("'gdb linux'\n"); return 0; } __uml_setup("debug", no_skas_debug_setup, "debug\n" " this flag is not needed to run gdb on UML in skas mode\n\n" ); #ifdef CONFIG_SMP static int __init uml_ncpus_setup(char *line, int *add) { if (!sscanf(line, "%d", &ncpus)) { printf("Couldn't parse [%s]\n", line); return -1; } return 0; } __uml_setup("ncpus=", uml_ncpus_setup, "ncpus=<# of desired CPUs>\n" " This tells an SMP kernel how many virtual processors to start.\n\n" ); #endif static int __init Usage(char *line, int *add) { const char **p; printf(usage_string, init_utsname()->release); p = &__uml_help_start; while (p < &__uml_help_end) { printf("%s", *p); p++; } exit(0); return 0; } __uml_setup("--help", Usage, "--help\n" " Prints this message.\n\n" ); static void __init uml_checksetup(char *line, int *add) { struct uml_param *p; p = &__uml_setup_start; while (p < &__uml_setup_end) { size_t n; n = strlen(p->str); if (!strncmp(line, p->str, n) && p->setup_func(line + n, add)) return; p++; } } static void __init uml_postsetup(void) { initcall_t *p; p = &__uml_postsetup_start; while (p < &__uml_postsetup_end) { (*p)(); p++; } return; } static int panic_exit(struct notifier_block *self, unsigned long unused1, void *unused2) { bust_spinlocks(1); show_regs(&(current->thread.regs)); bust_spinlocks(0); uml_exitcode = 1; os_dump_core(); return 0; } static struct notifier_block panic_exit_notifier = { .notifier_call = panic_exit, .next = NULL, .priority = 0 }; /* Set during early boot */ unsigned long task_size; EXPORT_SYMBOL(task_size); unsigned long host_task_size; unsigned long brk_start; unsigned long end_iomem; EXPORT_SYMBOL(end_iomem); #define MIN_VMALLOC (32 * 1024 * 1024) extern char __binary_start; int __init linux_main(int argc, char **argv) { unsigned long avail, diff; unsigned long virtmem_size, max_physmem; unsigned long stack; unsigned int i; int add; char * mode; for (i = 1; i < argc; i++) { if ((i == 1) && (argv[i][0] == ' ')) continue; add = 1; uml_checksetup(argv[i], &add); if (add) add_arg(argv[i]); } if (have_root == 0) add_arg(DEFAULT_COMMAND_LINE); host_task_size = os_get_top_address(); /* * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps * out */ task_size = host_task_size & PGDIR_MASK; /* OS sanity checks that need to happen before the kernel runs */ os_early_checks(); can_do_skas(); if (proc_mm && ptrace_faultinfo) mode = "SKAS3"; else mode = "SKAS0"; printf("UML running in %s mode\n", mode); brk_start = (unsigned long) sbrk(0); /* * Increase physical memory size for exec-shield users * so they actually get what they asked for. This should * add zero for non-exec shield users */ diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); if (diff > 1024 * 1024) { printf("Adding %ld bytes to physical memory to account for " "exec-shield gap\n", diff); physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end); } uml_physmem = (unsigned long) &__binary_start & PAGE_MASK; /* Reserve up to 4M after the current brk */ uml_reserved = ROUND_4M(brk_start) + (1 << 22); setup_machinename(init_utsname()->machine); highmem = 0; iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK; max_physmem = TASK_SIZE - uml_physmem - iomem_size - MIN_VMALLOC; /* * Zones have to begin on a 1 << MAX_ORDER page boundary, * so this makes sure that's true for highmem */ max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); if (physmem_size + iomem_size > max_physmem) { highmem = physmem_size + iomem_size - max_physmem; physmem_size -= highmem; #ifndef CONFIG_HIGHMEM highmem = 0; printf("CONFIG_HIGHMEM not enabled - physical memory shrunk " "to %Lu bytes\n", physmem_size); #endif } high_physmem = uml_physmem + physmem_size; end_iomem = high_physmem + iomem_size; high_memory = (void *) end_iomem; start_vm = VMALLOC_START; setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); if (init_maps(physmem_size, iomem_size, highmem)) { printf("Failed to allocate mem_map for %Lu bytes of physical " "memory and %Lu bytes of highmem\n", physmem_size, highmem); exit(1); } virtmem_size = physmem_size; stack = (unsigned long) argv; stack &= ~(1024 * 1024 - 1); avail = stack - start_vm; if (physmem_size > avail) virtmem_size = avail; end_vm = start_vm + virtmem_size; if (virtmem_size < physmem_size) printf("Kernel virtual memory size shrunk to %lu bytes\n", virtmem_size); atomic_notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); uml_postsetup(); stack_protections((unsigned long) &init_thread_info); os_flush_stdout(); return start_uml(); } void __init setup_arch(char **cmdline_p) { paging_init(); strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; setup_hostinfo(host_info, sizeof host_info); } void __init check_bugs(void) { arch_check_bugs(); os_check_bugs(); } void apply_alternatives(struct alt_instr *start, struct alt_instr *end) { } #ifdef CONFIG_SMP void alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end) { } void alternatives_smp_module_del(struct module *mod) { } #endif
gpl-2.0
TWRP-THEA/android_kernel_motorola_msm8226
arch/x86/kernel/apm_32.c
4665
70983
/* -*- linux-c -*- * APM BIOS driver for Linux * Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au) * * Initial development of this driver was funded by NEC Australia P/L * and NEC Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * October 1995, Rik Faith (faith@cs.unc.edu): * Minor enhancements and updates (to the patch set) for 1.3.x * Documentation * January 1996, Rik Faith (faith@cs.unc.edu): * Make /proc/apm easy to format (bump driver version) * March 1996, Rik Faith (faith@cs.unc.edu): * Prohibit APM BIOS calls unless apm_enabled. * (Thanks to Ulrich Windl <Ulrich.Windl@rz.uni-regensburg.de>) * April 1996, Stephen Rothwell (sfr@canb.auug.org.au) * Version 1.0 and 1.1 * May 1996, Version 1.2 * Feb 1998, Version 1.3 * Feb 1998, Version 1.4 * Aug 1998, Version 1.5 * Sep 1998, Version 1.6 * Nov 1998, Version 1.7 * Jan 1999, Version 1.8 * Jan 1999, Version 1.9 * Oct 1999, Version 1.10 * Nov 1999, Version 1.11 * Jan 2000, Version 1.12 * Feb 2000, Version 1.13 * Nov 2000, Version 1.14 * Oct 2001, Version 1.15 * Jan 2002, Version 1.16 * Oct 2002, Version 1.16ac * * History: * 0.6b: first version in official kernel, Linux 1.3.46 * 0.7: changed /proc/apm format, Linux 1.3.58 * 0.8: fixed gcc 2.7.[12] compilation problems, Linux 1.3.59 * 0.9: only call bios if bios is present, Linux 1.3.72 * 1.0: use fixed device number, consolidate /proc/apm into this file, * Linux 1.3.85 * 1.1: support user-space standby and suspend, power off after system * halted, Linux 1.3.98 * 1.2: When resetting RTC after resume, take care so that the time * is only incorrect by 30-60mS (vs. 1S previously) (Gabor J. Toth * <jtoth@princeton.edu>); improve interaction between * screen-blanking and gpm (Stephen Rothwell); Linux 1.99.4 * 1.2a:Simple change to stop mysterious bug reports with SMP also added * levels to the printk calls. APM is not defined for SMP machines. * The new replacement for it is, but Linux doesn't yet support this. * Alan Cox Linux 2.1.55 * 1.3: Set up a valid data descriptor 0x40 for buggy BIOS's * 1.4: Upgraded to support APM 1.2. Integrated ThinkPad suspend patch by * Dean Gaudet <dgaudet@arctic.org>. * C. Scott Ananian <cananian@alumni.princeton.edu> Linux 2.1.87 * 1.5: Fix segment register reloading (in case of bad segments saved * across BIOS call). * Stephen Rothwell * 1.6: Cope with compiler/assembler differences. * Only try to turn off the first display device. * Fix OOPS at power off with no APM BIOS by Jan Echternach * <echter@informatik.uni-rostock.de> * Stephen Rothwell * 1.7: Modify driver's cached copy of the disabled/disengaged flags * to reflect current state of APM BIOS. * Chris Rankin <rankinc@bellsouth.net> * Reset interrupt 0 timer to 100Hz after suspend * Chad Miller <cmiller@surfsouth.com> * Add CONFIG_APM_IGNORE_SUSPEND_BOUNCE * Richard Gooch <rgooch@atnf.csiro.au> * Allow boot time disabling of APM * Make boot messages far less verbose by default * Make asm safer * Stephen Rothwell * 1.8: Add CONFIG_APM_RTC_IS_GMT * Richard Gooch <rgooch@atnf.csiro.au> * change APM_NOINTS to CONFIG_APM_ALLOW_INTS * remove dependency on CONFIG_PROC_FS * Stephen Rothwell * 1.9: Fix small typo. <laslo@wodip.opole.pl> * Try to cope with BIOS's that need to have all display * devices blanked and not just the first one. * Ross Paterson <ross@soi.city.ac.uk> * Fix segment limit setting it has always been wrong as * the segments needed to have byte granularity. * Mark a few things __init. * Add hack to allow power off of SMP systems by popular request. * Use CONFIG_SMP instead of __SMP__ * Ignore BOUNCES for three seconds. * Stephen Rothwell * 1.10: Fix for Thinkpad return code. * Merge 2.2 and 2.3 drivers. * Remove APM dependencies in arch/i386/kernel/process.c * Remove APM dependencies in drivers/char/sysrq.c * Reset time across standby. * Allow more inititialisation on SMP. * Remove CONFIG_APM_POWER_OFF and make it boot time * configurable (default on). * Make debug only a boot time parameter (remove APM_DEBUG). * Try to blank all devices on any error. * 1.11: Remove APM dependencies in drivers/char/console.c * Check nr_running to detect if we are idle (from * Borislav Deianov <borislav@lix.polytechnique.fr>) * Fix for bioses that don't zero the top part of the * entrypoint offset (Mario Sitta <sitta@al.unipmn.it>) * (reported by Panos Katsaloulis <teras@writeme.com>). * Real mode power off patch (Walter Hofmann * <Walter.Hofmann@physik.stud.uni-erlangen.de>). * 1.12: Remove CONFIG_SMP as the compiler will optimize * the code away anyway (smp_num_cpus == 1 in UP) * noted by Artur Skawina <skawina@geocities.com>. * Make power off under SMP work again. * Fix thinko with initial engaging of BIOS. * Make sure power off only happens on CPU 0 * (Paul "Rusty" Russell <rusty@rustcorp.com.au>). * Do error notification to user mode if BIOS calls fail. * Move entrypoint offset fix to ...boot/setup.S * where it belongs (Cosmos <gis88564@cis.nctu.edu.tw>). * Remove smp-power-off. SMP users must now specify * "apm=power-off" on the kernel command line. Suggested * by Jim Avera <jima@hal.com>, modified by Alan Cox * <alan@lxorguk.ukuu.org.uk>. * Register the /proc/apm entry even on SMP so that * scripts that check for it before doing power off * work (Jim Avera <jima@hal.com>). * 1.13: Changes for new pm_ interfaces (Andy Henroid * <andy_henroid@yahoo.com>). * Modularize the code. * Fix the Thinkpad (again) :-( (CONFIG_APM_IGNORE_MULTIPLE_SUSPENDS * is now the way life works). * Fix thinko in suspend() (wrong return). * Notify drivers on critical suspend. * Make kapmd absorb more idle time (Pavel Machek <pavel@ucw.cz> * modified by sfr). * Disable interrupts while we are suspended (Andy Henroid * <andy_henroid@yahoo.com> fixed by sfr). * Make power off work on SMP again (Tony Hoyle * <tmh@magenta-logic.com> and <zlatko@iskon.hr>) modified by sfr. * Remove CONFIG_APM_SUSPEND_BOUNCE. The bounce ignore * interval is now configurable. * 1.14: Make connection version persist across module unload/load. * Enable and engage power management earlier. * Disengage power management on module unload. * Changed to use the sysrq-register hack for registering the * power off function called by magic sysrq based upon discussions * in irc://irc.openprojects.net/#kernelnewbies * (Crutcher Dunnavant <crutcher+kernel@datastacks.com>). * Make CONFIG_APM_REAL_MODE_POWER_OFF run time configurable. * (Arjan van de Ven <arjanv@redhat.com>) modified by sfr. * Work around byte swap bug in one of the Vaio's BIOS's * (Marc Boucher <marc@mbsi.ca>). * Exposed the disable flag to dmi so that we can handle known * broken APM (Alan Cox <alan@lxorguk.ukuu.org.uk>). * 1.14ac: If the BIOS says "I slowed the CPU down" then don't spin * calling it - instead idle. (Alan Cox <alan@lxorguk.ukuu.org.uk>) * If an APM idle fails log it and idle sensibly * 1.15: Don't queue events to clients who open the device O_WRONLY. * Don't expect replies from clients who open the device O_RDONLY. * (Idea from Thomas Hood) * Minor waitqueue cleanups. (John Fremlin <chief@bandits.org>) * 1.16: Fix idle calling. (Andreas Steinmetz <ast@domdv.de> et al.) * Notify listeners of standby or suspend events before notifying * drivers. Return EBUSY to ioctl() if suspend is rejected. * (Russell King <rmk@arm.linux.org.uk> and Thomas Hood) * Ignore first resume after we generate our own resume event * after a suspend (Thomas Hood) * Daemonize now gets rid of our controlling terminal (sfr). * CONFIG_APM_CPU_IDLE now just affects the default value of * idle_threshold (sfr). * Change name of kernel apm daemon (as it no longer idles) (sfr). * 1.16ac: Fix up SMP support somewhat. You can now force SMP on and we * make _all_ APM calls on the CPU#0. Fix unsafe sign bug. * TODO: determine if its "boot CPU" or "CPU0" we want to lock to. * * APM 1.1 Reference: * * Intel Corporation, Microsoft Corporation. Advanced Power Management * (APM) BIOS Interface Specification, Revision 1.1, September 1993. * Intel Order Number 241704-001. Microsoft Part Number 781-110-X01. * * [This document is available free from Intel by calling 800.628.8686 (fax * 916.356.6100) or 800.548.4725; or from * http://www.microsoft.com/whdc/archive/amp_12.mspx It is also * available from Microsoft by calling 206.882.8080.] * * APM 1.2 Reference: * Intel Corporation, Microsoft Corporation. Advanced Power Management * (APM) BIOS Interface Specification, Revision 1.2, February 1996. * * [This document is available from Microsoft at: * http://www.microsoft.com/whdc/archive/amp_12.mspx] */ #include <linux/module.h> #include <linux/poll.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/timer.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <linux/apm_bios.h> #include <linux/init.h> #include <linux/time.h> #include <linux/sched.h> #include <linux/pm.h> #include <linux/capability.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/freezer.h> #include <linux/smp.h> #include <linux/dmi.h> #include <linux/suspend.h> #include <linux/kthread.h> #include <linux/jiffies.h> #include <linux/acpi.h> #include <linux/syscore_ops.h> #include <linux/i8253.h> #include <asm/uaccess.h> #include <asm/desc.h> #include <asm/olpc.h> #include <asm/paravirt.h> #include <asm/reboot.h> #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) extern int (*console_blank_hook)(int); #endif /* * The apm_bios device is one of the misc char devices. * This is its minor number. */ #define APM_MINOR_DEV 134 /* * Various options can be changed at boot time as follows: * (We allow underscores for compatibility with the modules code) * apm=on/off enable/disable APM * [no-]allow[-_]ints allow interrupts during BIOS calls * [no-]broken[-_]psr BIOS has a broken GetPowerStatus call * [no-]realmode[-_]power[-_]off switch to real mode before * powering off * [no-]debug log some debugging messages * [no-]power[-_]off power off on shutdown * [no-]smp Use apm even on an SMP box * bounce[-_]interval=<n> number of ticks to ignore suspend * bounces * idle[-_]threshold=<n> System idle percentage above which to * make APM BIOS idle calls. Set it to * 100 to disable. * idle[-_]period=<n> Period (in 1/100s of a second) over * which the idle percentage is * calculated. */ /* KNOWN PROBLEM MACHINES: * * U: TI 4000M TravelMate: BIOS is *NOT* APM compliant * [Confirmed by TI representative] * ?: ACER 486DX4/75: uses dseg 0040, in violation of APM specification * [Confirmed by BIOS disassembly] * [This may work now ...] * P: Toshiba 1950S: battery life information only gets updated after resume * P: Midwest Micro Soundbook Elite DX2/66 monochrome: screen blanking * broken in BIOS [Reported by Garst R. Reese <reese@isn.net>] * ?: AcerNote-950: oops on reading /proc/apm - workaround is a WIP * Neale Banks <neale@lowendale.com.au> December 2000 * * Legend: U = unusable with APM patches * P = partially usable with APM patches */ /* * Define as 1 to make the driver always call the APM BIOS busy * routine even if the clock was not reported as slowed by the * idle routine. Otherwise, define as 0. */ #define ALWAYS_CALL_BUSY 1 /* * Define to make the APM BIOS calls zero all data segment registers (so * that an incorrect BIOS implementation will cause a kernel panic if it * tries to write to arbitrary memory). */ #define APM_ZERO_SEGS #include <asm/apm.h> /* * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. * This patched by Chad Miller <cmiller@surfsouth.com>, original code by * David Chen <chen@ctpa04.mit.edu> */ #undef INIT_TIMER_AFTER_SUSPEND #ifdef INIT_TIMER_AFTER_SUSPEND #include <linux/timex.h> #include <asm/io.h> #include <linux/delay.h> #endif /* * Need to poll the APM BIOS every second */ #define APM_CHECK_TIMEOUT (HZ) /* * Ignore suspend events for this amount of time after a resume */ #define DEFAULT_BOUNCE_INTERVAL (3 * HZ) /* * Maximum number of events stored */ #define APM_MAX_EVENTS 20 /* * The per-file APM data */ struct apm_user { int magic; struct apm_user *next; unsigned int suser: 1; unsigned int writer: 1; unsigned int reader: 1; unsigned int suspend_wait: 1; int suspend_result; int suspends_pending; int standbys_pending; int suspends_read; int standbys_read; int event_head; int event_tail; apm_event_t events[APM_MAX_EVENTS]; }; /* * The magic number in apm_user */ #define APM_BIOS_MAGIC 0x4101 /* * idle percentage above which bios idle calls are done */ #ifdef CONFIG_APM_CPU_IDLE #warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012 #define DEFAULT_IDLE_THRESHOLD 95 #else #define DEFAULT_IDLE_THRESHOLD 100 #endif #define DEFAULT_IDLE_PERIOD (100 / 3) /* * Local variables */ static struct { unsigned long offset; unsigned short segment; } apm_bios_entry; static int clock_slowed; static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; static int set_pm_idle; static int suspends_pending; static int standbys_pending; static int ignore_sys_suspend; static int ignore_normal_resume; static int bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL; static bool debug __read_mostly; static bool smp __read_mostly; static int apm_disabled = -1; #ifdef CONFIG_SMP static bool power_off; #else static bool power_off = 1; #endif static bool realmode_power_off; #ifdef CONFIG_APM_ALLOW_INTS static bool allow_ints = 1; #else static bool allow_ints; #endif static bool broken_psr; static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); static struct apm_user *user_list; static DEFINE_SPINLOCK(user_list_lock); static DEFINE_MUTEX(apm_mutex); /* * Set up a segment that references the real mode segment 0x40 * that extends up to the end of page zero (that we have reserved). * This is for buggy BIOS's that refer to (real mode) segment 0x40 * even though they are called in protected mode. */ static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); static const char driver_version[] = "1.16ac"; /* no spaces */ static struct task_struct *kapmd_task; /* * APM event names taken from the APM 1.2 specification. These are * the message codes that the BIOS uses to tell us about events */ static const char * const apm_event_name[] = { "system standby", "system suspend", "normal resume", "critical resume", "low battery", "power status change", "update time", "critical suspend", "user standby", "user suspend", "system standby resume", "capabilities change" }; #define NR_APM_EVENT_NAME ARRAY_SIZE(apm_event_name) typedef struct lookup_t { int key; char *msg; } lookup_t; /* * The BIOS returns a set of standard error codes in AX when the * carry flag is set. */ static const lookup_t error_table[] = { /* N/A { APM_SUCCESS, "Operation succeeded" }, */ { APM_DISABLED, "Power management disabled" }, { APM_CONNECTED, "Real mode interface already connected" }, { APM_NOT_CONNECTED, "Interface not connected" }, { APM_16_CONNECTED, "16 bit interface already connected" }, /* N/A { APM_16_UNSUPPORTED, "16 bit interface not supported" }, */ { APM_32_CONNECTED, "32 bit interface already connected" }, { APM_32_UNSUPPORTED, "32 bit interface not supported" }, { APM_BAD_DEVICE, "Unrecognized device ID" }, { APM_BAD_PARAM, "Parameter out of range" }, { APM_NOT_ENGAGED, "Interface not engaged" }, { APM_BAD_FUNCTION, "Function not supported" }, { APM_RESUME_DISABLED, "Resume timer disabled" }, { APM_BAD_STATE, "Unable to enter requested state" }, /* N/A { APM_NO_EVENTS, "No events pending" }, */ { APM_NO_ERROR, "BIOS did not set a return code" }, { APM_NOT_PRESENT, "No APM present" } }; #define ERROR_COUNT ARRAY_SIZE(error_table) /** * apm_error - display an APM error * @str: information string * @err: APM BIOS return code * * Write a meaningful log entry to the kernel log in the event of * an APM error. Note that this also handles (negative) kernel errors. */ static void apm_error(char *str, int err) { int i; for (i = 0; i < ERROR_COUNT; i++) if (error_table[i].key == err) break; if (i < ERROR_COUNT) printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); else if (err < 0) printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err); else printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", str, err); } /* * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and * apm_info.allow_ints, we are being really paranoid here! Not only * are interrupts disabled, but all the segment registers (except SS) * are saved and zeroed this means that if the BIOS tries to reference * any data without explicitly loading the segment registers, the kernel * will fault immediately rather than have some unforeseen circumstances * for the rest of the kernel. And it will be very obvious! :-) Doing * this depends on CS referring to the same physical memory as DS so that * DS can be zeroed before the call. Unfortunately, we can't do anything * about the stack segment/pointer. Also, we tell the compiler that * everything could change. * * Also, we KNOW that for the non error case of apm_bios_call, there * is no useful data returned in the low order 8 bits of eax. */ static inline unsigned long __apm_irq_save(void) { unsigned long flags; local_save_flags(flags); if (apm_info.allow_ints) { if (irqs_disabled_flags(flags)) local_irq_enable(); } else local_irq_disable(); return flags; } #define apm_irq_save(flags) \ do { flags = __apm_irq_save(); } while (0) static inline void apm_irq_restore(unsigned long flags) { if (irqs_disabled_flags(flags)) local_irq_disable(); else if (irqs_disabled()) local_irq_enable(); } #ifdef APM_ZERO_SEGS # define APM_DECL_SEGS \ unsigned int saved_fs; unsigned int saved_gs; # define APM_DO_SAVE_SEGS \ savesegment(fs, saved_fs); savesegment(gs, saved_gs) # define APM_DO_RESTORE_SEGS \ loadsegment(fs, saved_fs); loadsegment(gs, saved_gs) #else # define APM_DECL_SEGS # define APM_DO_SAVE_SEGS # define APM_DO_RESTORE_SEGS #endif struct apm_bios_call { u32 func; /* In and out */ u32 ebx; u32 ecx; /* Out only */ u32 eax; u32 edx; u32 esi; /* Error: -ENOMEM, or bits 8-15 of eax */ int err; }; /** * __apm_bios_call - Make an APM BIOS 32bit call * @_call: pointer to struct apm_bios_call. * * Make an APM call using the 32bit protected mode interface. The * caller is responsible for knowing if APM BIOS is configured and * enabled. This call can disable interrupts for a long period of * time on some laptops. The return value is in AH and the carry * flag is loaded into AL. If there is an error, then the error * code is returned in AH (bits 8-15 of eax) and this function * returns non-zero. * * Note: this makes the call on the current CPU. */ static long __apm_bios_call(void *_call) { APM_DECL_SEGS unsigned long flags; int cpu; struct desc_struct save_desc_40; struct desc_struct *gdt; struct apm_bios_call *call = _call; cpu = get_cpu(); BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); APM_DO_SAVE_SEGS; apm_bios_call_asm(call->func, call->ebx, call->ecx, &call->eax, &call->ebx, &call->ecx, &call->edx, &call->esi); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); return call->eax & 0xff; } /* Run __apm_bios_call or __apm_bios_call_simple on CPU 0 */ static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call) { int ret; /* Don't bother with work_on_cpu in the common case, so we don't * have to worry about OOM or overhead. */ if (get_cpu() == 0) { ret = fn(call); put_cpu(); } else { put_cpu(); ret = work_on_cpu(0, fn, call); } /* work_on_cpu can fail with -ENOMEM */ if (ret < 0) call->err = ret; else call->err = (call->eax >> 8) & 0xff; return ret; } /** * apm_bios_call - Make an APM BIOS 32bit call (on CPU 0) * @call: the apm_bios_call registers. * * If there is an error, it is returned in @call.err. */ static int apm_bios_call(struct apm_bios_call *call) { return on_cpu0(__apm_bios_call, call); } /** * __apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0) * @_call: pointer to struct apm_bios_call. * * Make a BIOS call that returns one value only, or just status. * If there is an error, then the error code is returned in AH * (bits 8-15 of eax) and this function returns non-zero (it can * also return -ENOMEM). This is used for simpler BIOS operations. * This call may hold interrupts off for a long time on some laptops. * * Note: this makes the call on the current CPU. */ static long __apm_bios_call_simple(void *_call) { u8 error; APM_DECL_SEGS unsigned long flags; int cpu; struct desc_struct save_desc_40; struct desc_struct *gdt; struct apm_bios_call *call = _call; cpu = get_cpu(); BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); APM_DO_SAVE_SEGS; error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, &call->eax); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); return error; } /** * apm_bios_call_simple - make a simple APM BIOS 32bit call * @func: APM function to invoke * @ebx_in: EBX register value for BIOS call * @ecx_in: ECX register value for BIOS call * @eax: EAX register on return from the BIOS call * @err: bits * * Make a BIOS call that returns one value only, or just status. * If there is an error, then the error code is returned in @err * and this function returns non-zero. This is used for simpler * BIOS operations. This call may hold interrupts off for a long * time on some laptops. */ static int apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax, int *err) { struct apm_bios_call call; int ret; call.func = func; call.ebx = ebx_in; call.ecx = ecx_in; ret = on_cpu0(__apm_bios_call_simple, &call); *eax = call.eax; *err = call.err; return ret; } /** * apm_driver_version - APM driver version * @val: loaded with the APM version on return * * Retrieve the APM version supported by the BIOS. This is only * supported for APM 1.1 or higher. An error indicates APM 1.0 is * probably present. * * On entry val should point to a value indicating the APM driver * version with the high byte being the major and the low byte the * minor number both in BCD * * On return it will hold the BIOS revision supported in the * same format. */ static int apm_driver_version(u_short *val) { u32 eax; int err; if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax, &err)) return err; *val = eax; return APM_SUCCESS; } /** * apm_get_event - get an APM event from the BIOS * @event: pointer to the event * @info: point to the event information * * The APM BIOS provides a polled information for event * reporting. The BIOS expects to be polled at least every second * when events are pending. When a message is found the caller should * poll until no more messages are present. However, this causes * problems on some laptops where a suspend event notification is * not cleared until it is acknowledged. * * Additional information is returned in the info pointer, providing * that APM 1.2 is in use. If no messges are pending the value 0x80 * is returned (No power management events pending). */ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) { struct apm_bios_call call; call.func = APM_FUNC_GET_EVENT; call.ebx = call.ecx = 0; if (apm_bios_call(&call)) return call.err; *event = call.ebx; if (apm_info.connection_version < 0x0102) *info = ~0; /* indicate info not valid */ else *info = call.ecx; return APM_SUCCESS; } /** * set_power_state - set the power management state * @what: which items to transition * @state: state to transition to * * Request an APM change of state for one or more system devices. The * processor state must be transitioned last of all. what holds the * class of device in the upper byte and the device number (0xFF for * all) for the object to be transitioned. * * The state holds the state to transition to, which may in fact * be an acceptance of a BIOS requested state change. */ static int set_power_state(u_short what, u_short state) { u32 eax; int err; if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax, &err)) return err; return APM_SUCCESS; } /** * set_system_power_state - set system wide power state * @state: which state to enter * * Transition the entire system into a new APM power state. */ static int set_system_power_state(u_short state) { return set_power_state(APM_DEVICE_ALL, state); } /** * apm_do_idle - perform power saving * * This function notifies the BIOS that the processor is (in the view * of the OS) idle. It returns -1 in the event that the BIOS refuses * to handle the idle request. On a success the function returns 1 * if the BIOS did clock slowing or 0 otherwise. */ static int apm_do_idle(void) { u32 eax; u8 ret = 0; int idled = 0; int polling; int err = 0; polling = !!(current_thread_info()->status & TS_POLLING); if (polling) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); } if (!need_resched()) { idled = 1; ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err); } if (polling) current_thread_info()->status |= TS_POLLING; if (!idled) return 0; if (ret) { static unsigned long t; /* This always fails on some SMP boards running UP kernels. * Only report the failure the first 5 times. */ if (++t < 5) { printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err); t = jiffies; } return -1; } clock_slowed = (apm_info.bios.flags & APM_IDLE_SLOWS_CLOCK) != 0; return clock_slowed; } /** * apm_do_busy - inform the BIOS the CPU is busy * * Request that the BIOS brings the CPU back to full performance. */ static void apm_do_busy(void) { u32 dummy; int err; if (clock_slowed || ALWAYS_CALL_BUSY) { (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err); clock_slowed = 0; } } /* * If no process has really been interested in * the CPU for some time, we want to call BIOS * power management - we probably want * to conserve power. */ #define IDLE_CALC_LIMIT (HZ * 100) #define IDLE_LEAKY_MAX 16 static void (*original_pm_idle)(void) __read_mostly; /** * apm_cpu_idle - cpu idling for APM capable Linux * * This is the idling function the kernel executes when APM is available. It * tries to do BIOS powermanagement based on the average system idle time. * Furthermore it calls the system default idle routine. */ static void apm_cpu_idle(void) { static int use_apm_idle; /* = 0 */ static unsigned int last_jiffies; /* = 0 */ static unsigned int last_stime; /* = 0 */ int apm_idle_done = 0; unsigned int jiffies_since_last_check = jiffies - last_jiffies; unsigned int bucket; WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012"); recalc: if (jiffies_since_last_check > IDLE_CALC_LIMIT) { use_apm_idle = 0; last_jiffies = jiffies; last_stime = current->stime; } else if (jiffies_since_last_check > idle_period) { unsigned int idle_percentage; idle_percentage = current->stime - last_stime; idle_percentage *= 100; idle_percentage /= jiffies_since_last_check; use_apm_idle = (idle_percentage > idle_threshold); if (apm_info.forbid_idle) use_apm_idle = 0; last_jiffies = jiffies; last_stime = current->stime; } bucket = IDLE_LEAKY_MAX; while (!need_resched()) { if (use_apm_idle) { unsigned int t; t = jiffies; switch (apm_do_idle()) { case 0: apm_idle_done = 1; if (t != jiffies) { if (bucket) { bucket = IDLE_LEAKY_MAX; continue; } } else if (bucket) { bucket--; continue; } break; case 1: apm_idle_done = 1; break; default: /* BIOS refused */ break; } } if (original_pm_idle) original_pm_idle(); else default_idle(); local_irq_disable(); jiffies_since_last_check = jiffies - last_jiffies; if (jiffies_since_last_check > idle_period) goto recalc; } if (apm_idle_done) apm_do_busy(); local_irq_enable(); } /** * apm_power_off - ask the BIOS to power off * * Handle the power off sequence. This is the one piece of code we * will execute even on SMP machines. In order to deal with BIOS * bugs we support real mode APM BIOS power off calls. We also make * the SMP call on CPU0 as some systems will only honour this call * on their first cpu. */ static void apm_power_off(void) { /* Some bioses don't like being called from CPU != 0 */ if (apm_info.realmode_power_off) { set_cpus_allowed_ptr(current, cpumask_of(0)); machine_real_restart(MRR_APM); } else { (void)set_system_power_state(APM_STATE_OFF); } } #ifdef CONFIG_APM_DO_ENABLE /** * apm_enable_power_management - enable BIOS APM power management * @enable: enable yes/no * * Enable or disable the APM BIOS power services. */ static int apm_enable_power_management(int enable) { u32 eax; int err; if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED)) return APM_NOT_ENGAGED; if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL, enable, &eax, &err)) return err; if (enable) apm_info.bios.flags &= ~APM_BIOS_DISABLED; else apm_info.bios.flags |= APM_BIOS_DISABLED; return APM_SUCCESS; } #endif /** * apm_get_power_status - get current power state * @status: returned status * @bat: battery info * @life: estimated life * * Obtain the current power status from the APM BIOS. We return a * status which gives the rough battery status, and current power * source. The bat value returned give an estimate as a percentage * of life and a status value for the battery. The estimated life * if reported is a lifetime in secodnds/minutes at current powwer * consumption. */ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) { struct apm_bios_call call; call.func = APM_FUNC_GET_STATUS; call.ebx = APM_DEVICE_ALL; call.ecx = 0; if (apm_info.get_power_status_broken) return APM_32_UNSUPPORTED; if (apm_bios_call(&call)) return call.err; *status = call.ebx; *bat = call.ecx; if (apm_info.get_power_status_swabinminutes) { *life = swab16((u16)call.edx); *life |= 0x8000; } else *life = call.edx; return APM_SUCCESS; } #if 0 static int apm_get_battery_status(u_short which, u_short *status, u_short *bat, u_short *life, u_short *nbat) { u32 eax; u32 ebx; u32 ecx; u32 edx; u32 esi; if (apm_info.connection_version < 0x0102) { /* pretend we only have one battery. */ if (which != 1) return APM_BAD_DEVICE; *nbat = 1; return apm_get_power_status(status, bat, life); } if (apm_bios_call(APM_FUNC_GET_STATUS, (0x8000 | (which)), 0, &eax, &ebx, &ecx, &edx, &esi)) return (eax >> 8) & 0xff; *status = ebx; *bat = ecx; *life = edx; *nbat = esi; return APM_SUCCESS; } #endif /** * apm_engage_power_management - enable PM on a device * @device: identity of device * @enable: on/off * * Activate or deactive power management on either a specific device * or the entire system (%APM_DEVICE_ALL). */ static int apm_engage_power_management(u_short device, int enable) { u32 eax; int err; if ((enable == 0) && (device == APM_DEVICE_ALL) && (apm_info.bios.flags & APM_BIOS_DISABLED)) return APM_DISABLED; if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, &eax, &err)) return err; if (device == APM_DEVICE_ALL) { if (enable) apm_info.bios.flags &= ~APM_BIOS_DISENGAGED; else apm_info.bios.flags |= APM_BIOS_DISENGAGED; } return APM_SUCCESS; } #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) /** * apm_console_blank - blank the display * @blank: on/off * * Attempt to blank the console, firstly by blanking just video device * zero, and if that fails (some BIOSes don't support it) then it blanks * all video devices. Typically the BIOS will do laptop backlight and * monitor powerdown for us. */ static int apm_console_blank(int blank) { int error = APM_NOT_ENGAGED; /* silence gcc */ int i; u_short state; static const u_short dev[3] = { 0x100, 0x1FF, 0x101 }; state = blank ? APM_STATE_STANDBY : APM_STATE_READY; for (i = 0; i < ARRAY_SIZE(dev); i++) { error = set_power_state(dev[i], state); if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) return 1; if (error == APM_NOT_ENGAGED) break; } if (error == APM_NOT_ENGAGED) { static int tried; int eng_error; if (tried++ == 0) { eng_error = apm_engage_power_management(APM_DEVICE_ALL, 1); if (eng_error) { apm_error("set display", error); apm_error("engage interface", eng_error); return 0; } else return apm_console_blank(blank); } } apm_error("set display", error); return 0; } #endif static int queue_empty(struct apm_user *as) { return as->event_head == as->event_tail; } static apm_event_t get_queued_event(struct apm_user *as) { if (++as->event_tail >= APM_MAX_EVENTS) as->event_tail = 0; return as->events[as->event_tail]; } static void queue_event(apm_event_t event, struct apm_user *sender) { struct apm_user *as; spin_lock(&user_list_lock); if (user_list == NULL) goto out; for (as = user_list; as != NULL; as = as->next) { if ((as == sender) || (!as->reader)) continue; if (++as->event_head >= APM_MAX_EVENTS) as->event_head = 0; if (as->event_head == as->event_tail) { static int notified; if (notified++ == 0) printk(KERN_ERR "apm: an event queue overflowed\n"); if (++as->event_tail >= APM_MAX_EVENTS) as->event_tail = 0; } as->events[as->event_head] = event; if (!as->suser || !as->writer) continue; switch (event) { case APM_SYS_SUSPEND: case APM_USER_SUSPEND: as->suspends_pending++; suspends_pending++; break; case APM_SYS_STANDBY: case APM_USER_STANDBY: as->standbys_pending++; standbys_pending++; break; } } wake_up_interruptible(&apm_waitqueue); out: spin_unlock(&user_list_lock); } static void reinit_timer(void) { #ifdef INIT_TIMER_AFTER_SUSPEND unsigned long flags; raw_spin_lock_irqsave(&i8253_lock, flags); /* set the clock to HZ */ outb_p(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ udelay(10); outb_p(LATCH & 0xff, PIT_CH0); /* LSB */ udelay(10); outb_p(LATCH >> 8, PIT_CH0); /* MSB */ udelay(10); raw_spin_unlock_irqrestore(&i8253_lock, flags); #endif } static int suspend(int vetoable) { int err; struct apm_user *as; dpm_suspend_start(PMSG_SUSPEND); dpm_suspend_end(PMSG_SUSPEND); local_irq_disable(); syscore_suspend(); local_irq_enable(); save_processor_state(); err = set_system_power_state(APM_STATE_SUSPEND); ignore_normal_resume = 1; restore_processor_state(); local_irq_disable(); reinit_timer(); if (err == APM_NO_ERROR) err = APM_SUCCESS; if (err != APM_SUCCESS) apm_error("suspend", err); err = (err == APM_SUCCESS) ? 0 : -EIO; syscore_resume(); local_irq_enable(); dpm_resume_start(PMSG_RESUME); dpm_resume_end(PMSG_RESUME); queue_event(APM_NORMAL_RESUME, NULL); spin_lock(&user_list_lock); for (as = user_list; as != NULL; as = as->next) { as->suspend_wait = 0; as->suspend_result = err; } spin_unlock(&user_list_lock); wake_up_interruptible(&apm_suspend_waitqueue); return err; } static void standby(void) { int err; dpm_suspend_end(PMSG_SUSPEND); local_irq_disable(); syscore_suspend(); local_irq_enable(); err = set_system_power_state(APM_STATE_STANDBY); if ((err != APM_SUCCESS) && (err != APM_NO_ERROR)) apm_error("standby", err); local_irq_disable(); syscore_resume(); local_irq_enable(); dpm_resume_start(PMSG_RESUME); } static apm_event_t get_event(void) { int error; apm_event_t event = APM_NO_EVENTS; /* silence gcc */ apm_eventinfo_t info; static int notified; /* we don't use the eventinfo */ error = apm_get_event(&event, &info); if (error == APM_SUCCESS) return event; if ((error != APM_NO_EVENTS) && (notified++ == 0)) apm_error("get_event", error); return 0; } static void check_events(void) { apm_event_t event; static unsigned long last_resume; static int ignore_bounce; while ((event = get_event()) != 0) { if (debug) { if (event <= NR_APM_EVENT_NAME) printk(KERN_DEBUG "apm: received %s notify\n", apm_event_name[event - 1]); else printk(KERN_DEBUG "apm: received unknown " "event 0x%02x\n", event); } if (ignore_bounce && (time_after(jiffies, last_resume + bounce_interval))) ignore_bounce = 0; switch (event) { case APM_SYS_STANDBY: case APM_USER_STANDBY: queue_event(event, NULL); if (standbys_pending <= 0) standby(); break; case APM_USER_SUSPEND: #ifdef CONFIG_APM_IGNORE_USER_SUSPEND if (apm_info.connection_version > 0x100) set_system_power_state(APM_STATE_REJECT); break; #endif case APM_SYS_SUSPEND: if (ignore_bounce) { if (apm_info.connection_version > 0x100) set_system_power_state(APM_STATE_REJECT); break; } /* * If we are already processing a SUSPEND, * then further SUSPEND events from the BIOS * will be ignored. We also return here to * cope with the fact that the Thinkpads keep * sending a SUSPEND event until something else * happens! */ if (ignore_sys_suspend) return; ignore_sys_suspend = 1; queue_event(event, NULL); if (suspends_pending <= 0) (void) suspend(1); break; case APM_NORMAL_RESUME: case APM_CRITICAL_RESUME: case APM_STANDBY_RESUME: ignore_sys_suspend = 0; last_resume = jiffies; ignore_bounce = 1; if ((event != APM_NORMAL_RESUME) || (ignore_normal_resume == 0)) { dpm_resume_end(PMSG_RESUME); queue_event(event, NULL); } ignore_normal_resume = 0; break; case APM_CAPABILITY_CHANGE: case APM_LOW_BATTERY: case APM_POWER_STATUS_CHANGE: queue_event(event, NULL); /* If needed, notify drivers here */ break; case APM_UPDATE_TIME: break; case APM_CRITICAL_SUSPEND: /* * We are not allowed to reject a critical suspend. */ (void)suspend(0); break; } } } static void apm_event_handler(void) { static int pending_count = 4; int err; if ((standbys_pending > 0) || (suspends_pending > 0)) { if ((apm_info.connection_version > 0x100) && (pending_count-- <= 0)) { pending_count = 4; if (debug) printk(KERN_DEBUG "apm: setting state busy\n"); err = set_system_power_state(APM_STATE_BUSY); if (err) apm_error("busy", err); } } else pending_count = 4; check_events(); } /* * This is the APM thread main loop. */ static void apm_mainloop(void) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(&apm_waitqueue, &wait); set_current_state(TASK_INTERRUPTIBLE); for (;;) { schedule_timeout(APM_CHECK_TIMEOUT); if (kthread_should_stop()) break; /* * Ok, check all events, check for idle (and mark us sleeping * so as not to count towards the load average).. */ set_current_state(TASK_INTERRUPTIBLE); apm_event_handler(); } remove_wait_queue(&apm_waitqueue, &wait); } static int check_apm_user(struct apm_user *as, const char *func) { if (as == NULL || as->magic != APM_BIOS_MAGIC) { printk(KERN_ERR "apm: %s passed bad filp\n", func); return 1; } return 0; } static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) { struct apm_user *as; int i; apm_event_t event; as = fp->private_data; if (check_apm_user(as, "read")) return -EIO; if ((int)count < sizeof(apm_event_t)) return -EINVAL; if ((queue_empty(as)) && (fp->f_flags & O_NONBLOCK)) return -EAGAIN; wait_event_interruptible(apm_waitqueue, !queue_empty(as)); i = count; while ((i >= sizeof(event)) && !queue_empty(as)) { event = get_queued_event(as); if (copy_to_user(buf, &event, sizeof(event))) { if (i < count) break; return -EFAULT; } switch (event) { case APM_SYS_SUSPEND: case APM_USER_SUSPEND: as->suspends_read++; break; case APM_SYS_STANDBY: case APM_USER_STANDBY: as->standbys_read++; break; } buf += sizeof(event); i -= sizeof(event); } if (i < count) return count - i; if (signal_pending(current)) return -ERESTARTSYS; return 0; } static unsigned int do_poll(struct file *fp, poll_table *wait) { struct apm_user *as; as = fp->private_data; if (check_apm_user(as, "poll")) return 0; poll_wait(fp, &apm_waitqueue, wait); if (!queue_empty(as)) return POLLIN | POLLRDNORM; return 0; } static long do_ioctl(struct file *filp, u_int cmd, u_long arg) { struct apm_user *as; int ret; as = filp->private_data; if (check_apm_user(as, "ioctl")) return -EIO; if (!as->suser || !as->writer) return -EPERM; switch (cmd) { case APM_IOC_STANDBY: mutex_lock(&apm_mutex); if (as->standbys_read > 0) { as->standbys_read--; as->standbys_pending--; standbys_pending--; } else queue_event(APM_USER_STANDBY, as); if (standbys_pending <= 0) standby(); mutex_unlock(&apm_mutex); break; case APM_IOC_SUSPEND: mutex_lock(&apm_mutex); if (as->suspends_read > 0) { as->suspends_read--; as->suspends_pending--; suspends_pending--; } else queue_event(APM_USER_SUSPEND, as); if (suspends_pending <= 0) { ret = suspend(1); mutex_unlock(&apm_mutex); } else { as->suspend_wait = 1; mutex_unlock(&apm_mutex); wait_event_interruptible(apm_suspend_waitqueue, as->suspend_wait == 0); ret = as->suspend_result; } return ret; default: return -ENOTTY; } return 0; } static int do_release(struct inode *inode, struct file *filp) { struct apm_user *as; as = filp->private_data; if (check_apm_user(as, "release")) return 0; filp->private_data = NULL; if (as->standbys_pending > 0) { standbys_pending -= as->standbys_pending; if (standbys_pending <= 0) standby(); } if (as->suspends_pending > 0) { suspends_pending -= as->suspends_pending; if (suspends_pending <= 0) (void) suspend(1); } spin_lock(&user_list_lock); if (user_list == as) user_list = as->next; else { struct apm_user *as1; for (as1 = user_list; (as1 != NULL) && (as1->next != as); as1 = as1->next) ; if (as1 == NULL) printk(KERN_ERR "apm: filp not in user list\n"); else as1->next = as->next; } spin_unlock(&user_list_lock); kfree(as); return 0; } static int do_open(struct inode *inode, struct file *filp) { struct apm_user *as; as = kmalloc(sizeof(*as), GFP_KERNEL); if (as == NULL) { printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n", sizeof(*as)); return -ENOMEM; } as->magic = APM_BIOS_MAGIC; as->event_tail = as->event_head = 0; as->suspends_pending = as->standbys_pending = 0; as->suspends_read = as->standbys_read = 0; /* * XXX - this is a tiny bit broken, when we consider BSD * process accounting. If the device is opened by root, we * instantly flag that we used superuser privs. Who knows, * we might close the device immediately without doing a * privileged operation -- cevans */ as->suser = capable(CAP_SYS_ADMIN); as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; spin_lock(&user_list_lock); as->next = user_list; user_list = as; spin_unlock(&user_list_lock); filp->private_data = as; return 0; } static int proc_apm_show(struct seq_file *m, void *v) { unsigned short bx; unsigned short cx; unsigned short dx; int error; unsigned short ac_line_status = 0xff; unsigned short battery_status = 0xff; unsigned short battery_flag = 0xff; int percentage = -1; int time_units = -1; char *units = "?"; if ((num_online_cpus() == 1) && !(error = apm_get_power_status(&bx, &cx, &dx))) { ac_line_status = (bx >> 8) & 0xff; battery_status = bx & 0xff; if ((cx & 0xff) != 0xff) percentage = cx & 0xff; if (apm_info.connection_version > 0x100) { battery_flag = (cx >> 8) & 0xff; if (dx != 0xffff) { units = (dx & 0x8000) ? "min" : "sec"; time_units = dx & 0x7fff; } } } /* Arguments, with symbols from linux/apm_bios.h. Information is from the Get Power Status (0x0a) call unless otherwise noted. 0) Linux driver version (this will change if format changes) 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. 2) APM flags from APM Installation Check (0x00): bit 0: APM_16_BIT_SUPPORT bit 1: APM_32_BIT_SUPPORT bit 2: APM_IDLE_SLOWS_CLOCK bit 3: APM_BIOS_DISABLED bit 4: APM_BIOS_DISENGAGED 3) AC line status 0x00: Off-line 0x01: On-line 0x02: On backup power (BIOS >= 1.1 only) 0xff: Unknown 4) Battery status 0x00: High 0x01: Low 0x02: Critical 0x03: Charging 0x04: Selected battery not present (BIOS >= 1.2 only) 0xff: Unknown 5) Battery flag bit 0: High bit 1: Low bit 2: Critical bit 3: Charging bit 7: No system battery 0xff: Unknown 6) Remaining battery life (percentage of charge): 0-100: valid -1: Unknown 7) Remaining battery life (time units): Number of remaining minutes or seconds -1: Unknown 8) min = minutes; sec = seconds */ seq_printf(m, "%s %d.%d 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", driver_version, (apm_info.bios.version >> 8) & 0xff, apm_info.bios.version & 0xff, apm_info.bios.flags, ac_line_status, battery_status, battery_flag, percentage, time_units, units); return 0; } static int proc_apm_open(struct inode *inode, struct file *file) { return single_open(file, proc_apm_show, NULL); } static const struct file_operations apm_file_ops = { .owner = THIS_MODULE, .open = proc_apm_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int apm(void *unused) { unsigned short bx; unsigned short cx; unsigned short dx; int error; char *power_stat; char *bat_stat; /* 2002/08/01 - WT * This is to avoid random crashes at boot time during initialization * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D. * Some bioses don't like being called from CPU != 0. * Method suggested by Ingo Molnar. */ set_cpus_allowed_ptr(current, cpumask_of(0)); BUG_ON(smp_processor_id() != 0); if (apm_info.connection_version == 0) { apm_info.connection_version = apm_info.bios.version; if (apm_info.connection_version > 0x100) { /* * We only support BIOSs up to version 1.2 */ if (apm_info.connection_version > 0x0102) apm_info.connection_version = 0x0102; error = apm_driver_version(&apm_info.connection_version); if (error != APM_SUCCESS) { apm_error("driver version", error); /* Fall back to an APM 1.0 connection. */ apm_info.connection_version = 0x100; } } } if (debug) printk(KERN_INFO "apm: Connection version %d.%d\n", (apm_info.connection_version >> 8) & 0xff, apm_info.connection_version & 0xff); #ifdef CONFIG_APM_DO_ENABLE if (apm_info.bios.flags & APM_BIOS_DISABLED) { /* * This call causes my NEC UltraLite Versa 33/C to hang if it * is booted with PM disabled but not in the docking station. * Unfortunate ... */ error = apm_enable_power_management(1); if (error) { apm_error("enable power management", error); return -1; } } #endif if ((apm_info.bios.flags & APM_BIOS_DISENGAGED) && (apm_info.connection_version > 0x0100)) { error = apm_engage_power_management(APM_DEVICE_ALL, 1); if (error) { apm_error("engage power management", error); return -1; } } if (debug && (num_online_cpus() == 1 || smp)) { error = apm_get_power_status(&bx, &cx, &dx); if (error) printk(KERN_INFO "apm: power status not available\n"); else { switch ((bx >> 8) & 0xff) { case 0: power_stat = "off line"; break; case 1: power_stat = "on line"; break; case 2: power_stat = "on backup power"; break; default: power_stat = "unknown"; break; } switch (bx & 0xff) { case 0: bat_stat = "high"; break; case 1: bat_stat = "low"; break; case 2: bat_stat = "critical"; break; case 3: bat_stat = "charging"; break; default: bat_stat = "unknown"; break; } printk(KERN_INFO "apm: AC %s, battery status %s, battery life ", power_stat, bat_stat); if ((cx & 0xff) == 0xff) printk("unknown\n"); else printk("%d%%\n", cx & 0xff); if (apm_info.connection_version > 0x100) { printk(KERN_INFO "apm: battery flag 0x%02x, battery life ", (cx >> 8) & 0xff); if (dx == 0xffff) printk("unknown\n"); else printk("%d %s\n", dx & 0x7fff, (dx & 0x8000) ? "minutes" : "seconds"); } } } /* Install our power off handler.. */ if (power_off) pm_power_off = apm_power_off; if (num_online_cpus() == 1 || smp) { #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) console_blank_hook = apm_console_blank; #endif apm_mainloop(); #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) console_blank_hook = NULL; #endif } return 0; } #ifndef MODULE static int __init apm_setup(char *str) { int invert; while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "off", 3) == 0) apm_disabled = 1; if (strncmp(str, "on", 2) == 0) apm_disabled = 0; if ((strncmp(str, "bounce-interval=", 16) == 0) || (strncmp(str, "bounce_interval=", 16) == 0)) bounce_interval = simple_strtol(str + 16, NULL, 0); if ((strncmp(str, "idle-threshold=", 15) == 0) || (strncmp(str, "idle_threshold=", 15) == 0)) idle_threshold = simple_strtol(str + 15, NULL, 0); if ((strncmp(str, "idle-period=", 12) == 0) || (strncmp(str, "idle_period=", 12) == 0)) idle_period = simple_strtol(str + 12, NULL, 0); invert = (strncmp(str, "no-", 3) == 0) || (strncmp(str, "no_", 3) == 0); if (invert) str += 3; if (strncmp(str, "debug", 5) == 0) debug = !invert; if ((strncmp(str, "power-off", 9) == 0) || (strncmp(str, "power_off", 9) == 0)) power_off = !invert; if (strncmp(str, "smp", 3) == 0) { smp = !invert; idle_threshold = 100; } if ((strncmp(str, "allow-ints", 10) == 0) || (strncmp(str, "allow_ints", 10) == 0)) apm_info.allow_ints = !invert; if ((strncmp(str, "broken-psr", 10) == 0) || (strncmp(str, "broken_psr", 10) == 0)) apm_info.get_power_status_broken = !invert; if ((strncmp(str, "realmode-power-off", 18) == 0) || (strncmp(str, "realmode_power_off", 18) == 0)) apm_info.realmode_power_off = !invert; str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("apm=", apm_setup); #endif static const struct file_operations apm_bios_fops = { .owner = THIS_MODULE, .read = do_read, .poll = do_poll, .unlocked_ioctl = do_ioctl, .open = do_open, .release = do_release, .llseek = noop_llseek, }; static struct miscdevice apm_device = { APM_MINOR_DEV, "apm_bios", &apm_bios_fops }; /* Simple "print if true" callback */ static int __init print_if_true(const struct dmi_system_id *d) { printk("%s\n", d->ident); return 0; } /* * Some Bioses enable the PS/2 mouse (touchpad) at resume, even if it was * disabled before the suspend. Linux used to get terribly confused by that. */ static int __init broken_ps2_resume(const struct dmi_system_id *d) { printk(KERN_INFO "%s machine detected. Mousepad Resume Bug " "workaround hopefully not needed.\n", d->ident); return 0; } /* Some bioses have a broken protected mode poweroff and need to use realmode */ static int __init set_realmode_power_off(const struct dmi_system_id *d) { if (apm_info.realmode_power_off == 0) { apm_info.realmode_power_off = 1; printk(KERN_INFO "%s bios detected. " "Using realmode poweroff only.\n", d->ident); } return 0; } /* Some laptops require interrupts to be enabled during APM calls */ static int __init set_apm_ints(const struct dmi_system_id *d) { if (apm_info.allow_ints == 0) { apm_info.allow_ints = 1; printk(KERN_INFO "%s machine detected. " "Enabling interrupts during APM calls.\n", d->ident); } return 0; } /* Some APM bioses corrupt memory or just plain do not work */ static int __init apm_is_horked(const struct dmi_system_id *d) { if (apm_info.disabled == 0) { apm_info.disabled = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM.\n", d->ident); } return 0; } static int __init apm_is_horked_d850md(const struct dmi_system_id *d) { if (apm_info.disabled == 0) { apm_info.disabled = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM.\n", d->ident); printk(KERN_INFO "This bug is fixed in bios P15 which is available for\n"); printk(KERN_INFO "download from support.intel.com\n"); } return 0; } /* Some APM bioses hang on APM idle calls */ static int __init apm_likes_to_melt(const struct dmi_system_id *d) { if (apm_info.forbid_idle == 0) { apm_info.forbid_idle = 1; printk(KERN_INFO "%s machine detected. " "Disabling APM idle calls.\n", d->ident); } return 0; } /* * Check for clue free BIOS implementations who use * the following QA technique * * [ Write BIOS Code ]<------ * | ^ * < Does it Compile >----N-- * |Y ^ * < Does it Boot Win98 >-N-- * |Y * [Ship It] * * Phoenix A04 08/24/2000 is known bad (Dell Inspiron 5000e) * Phoenix A07 09/29/2000 is known good (Dell Inspiron 5000) */ static int __init broken_apm_power(const struct dmi_system_id *d) { apm_info.get_power_status_broken = 1; printk(KERN_WARNING "BIOS strings suggest APM bugs, " "disabling power status reporting.\n"); return 0; } /* * This bios swaps the APM minute reporting bytes over (Many sony laptops * have this problem). */ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d) { apm_info.get_power_status_swabinminutes = 1; printk(KERN_WARNING "BIOS strings suggest APM reports battery life " "in minutes and wrong byte order.\n"); return 0; } static struct dmi_system_id __initdata apm_dmi_table[] = { { print_if_true, KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.", { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), DMI_MATCH(DMI_BIOS_VERSION, "1AET38WW (1.01b)"), }, }, { /* Handle problems with APM on the C600 */ broken_ps2_resume, "Dell Latitude C600", { DMI_MATCH(DMI_SYS_VENDOR, "Dell"), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C600"), }, }, { /* Allow interrupts during suspend on Dell Latitude laptops*/ set_apm_ints, "Dell Latitude", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C510"), } }, { /* APM crashes */ apm_is_horked, "Dell Inspiron 2500", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* Allow interrupts during suspend on Dell Inspiron laptops*/ set_apm_ints, "Dell Inspiron", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"), }, }, { /* Handle problems with APM on Inspiron 5000e */ broken_apm_power, "Dell Inspiron 5000e", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A04"), DMI_MATCH(DMI_BIOS_DATE, "08/24/2000"), }, }, { /* Handle problems with APM on Inspiron 2500 */ broken_apm_power, "Dell Inspiron 2500", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A12"), DMI_MATCH(DMI_BIOS_DATE, "02/04/2002"), }, }, { /* APM crashes */ apm_is_horked, "Dell Dimension 4100", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* Allow interrupts during suspend on Compaq Laptops*/ set_apm_ints, "Compaq 12XL125", { DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), DMI_MATCH(DMI_PRODUCT_NAME, "Compaq PC"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "4.06"), }, }, { /* Allow interrupts during APM or the clock goes slow */ set_apm_ints, "ASUSTeK", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "L8400K series Notebook PC"), }, }, { /* APM blows on shutdown */ apm_is_horked, "ABIT KX7-333[R]", { DMI_MATCH(DMI_BOARD_VENDOR, "ABIT"), DMI_MATCH(DMI_BOARD_NAME, "VT8367-8233A (KX7-333[R])"), }, }, { /* APM crashes */ apm_is_horked, "Trigem Delhi3", { DMI_MATCH(DMI_SYS_VENDOR, "TriGem Computer, Inc"), DMI_MATCH(DMI_PRODUCT_NAME, "Delhi3"), }, }, { /* APM crashes */ apm_is_horked, "Fujitsu-Siemens", { DMI_MATCH(DMI_BIOS_VENDOR, "hoenix/FUJITSU SIEMENS"), DMI_MATCH(DMI_BIOS_VERSION, "Version1.01"), }, }, { /* APM crashes */ apm_is_horked_d850md, "Intel D850MD", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "MV85010A.86A.0016.P07.0201251536"), }, }, { /* APM crashes */ apm_is_horked, "Intel D810EMO", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "MO81010A.86A.0008.P04.0004170800"), }, }, { /* APM crashes */ apm_is_horked, "Dell XPS-Z", { DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_BIOS_VERSION, "A11"), DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), }, }, { /* APM crashes */ apm_is_horked, "Sharp PC-PJ/AX", { DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), DMI_MATCH(DMI_PRODUCT_NAME, "PC-PJ/AX"), DMI_MATCH(DMI_BIOS_VENDOR, "SystemSoft"), DMI_MATCH(DMI_BIOS_VERSION, "Version R2.08"), }, }, { /* APM crashes */ apm_is_horked, "Dell Inspiron 2500", { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "A11"), }, }, { /* APM idle hangs */ apm_likes_to_melt, "Jabil AMD", { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "0AASNP06"), }, }, { /* APM idle hangs */ apm_likes_to_melt, "AMI Bios", { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "0AASNP05"), }, }, { /* Handle problems with APM on Sony Vaio PCG-N505X(DE) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0206H"), DMI_MATCH(DMI_BIOS_DATE, "08/23/99"), }, }, { /* Handle problems with APM on Sony Vaio PCG-N505VX */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "W2K06H0"), DMI_MATCH(DMI_BIOS_DATE, "02/03/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-XG29 */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0117A0"), DMI_MATCH(DMI_BIOS_DATE, "04/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600NE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0121Z1"), DMI_MATCH(DMI_BIOS_DATE, "05/11/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600NE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "WME01Z1"), DMI_MATCH(DMI_BIOS_DATE, "08/11/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z600LEK(DE) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0206Z3"), DMI_MATCH(DMI_BIOS_DATE, "12/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0203D0"), DMI_MATCH(DMI_BIOS_DATE, "05/12/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0203Z3"), DMI_MATCH(DMI_BIOS_DATE, "08/25/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-Z505LS (with updated BIOS) */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0209Z3"), DMI_MATCH(DMI_BIOS_DATE, "05/12/01"), }, }, { /* Handle problems with APM on Sony Vaio PCG-F104K */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0204K2"), DMI_MATCH(DMI_BIOS_DATE, "08/28/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VN/C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0208P1"), DMI_MATCH(DMI_BIOS_DATE, "11/09/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "R0204P1"), DMI_MATCH(DMI_BIOS_DATE, "09/12/00"), }, }, { /* Handle problems with APM on Sony Vaio PCG-C1VE */ swab_apm_power_in_minutes, "Sony VAIO", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION, "WXPO1Z3"), DMI_MATCH(DMI_BIOS_DATE, "10/26/01"), }, }, { /* broken PM poweroff bios */ set_realmode_power_off, "Award Software v4.60 PGMA", { DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), DMI_MATCH(DMI_BIOS_VERSION, "4.60 PGMA"), DMI_MATCH(DMI_BIOS_DATE, "134526184"), }, }, /* Generic per vendor APM settings */ { /* Allow interrupts during suspend on IBM laptops */ set_apm_ints, "IBM", { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, }, { } }; /* * Just start the APM thread. We do NOT want to do APM BIOS * calls from anything but the APM thread, if for no other reason * than the fact that we don't trust the APM BIOS. This way, * most common APM BIOS problems that lead to protection errors * etc will have at least some level of being contained... * * In short, if something bad happens, at least we have a choice * of just killing the apm thread.. */ static int __init apm_init(void) { struct desc_struct *gdt; int err; dmi_check_system(apm_dmi_table); if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) { printk(KERN_INFO "apm: BIOS not found.\n"); return -ENODEV; } printk(KERN_INFO "apm: BIOS version %d.%d Flags 0x%02x (Driver version %s)\n", ((apm_info.bios.version >> 8) & 0xff), (apm_info.bios.version & 0xff), apm_info.bios.flags, driver_version); if ((apm_info.bios.flags & APM_32_BIT_SUPPORT) == 0) { printk(KERN_INFO "apm: no 32 bit BIOS support\n"); return -ENODEV; } if (allow_ints) apm_info.allow_ints = 1; if (broken_psr) apm_info.get_power_status_broken = 1; if (realmode_power_off) apm_info.realmode_power_off = 1; /* User can override, but default is to trust DMI */ if (apm_disabled != -1) apm_info.disabled = apm_disabled; /* * Fix for the Compaq Contura 3/25c which reports BIOS version 0.1 * but is reportedly a 1.0 BIOS. */ if (apm_info.bios.version == 0x001) apm_info.bios.version = 0x100; /* BIOS < 1.2 doesn't set cseg_16_len */ if (apm_info.bios.version < 0x102) apm_info.bios.cseg_16_len = 0; /* 64k */ if (debug) { printk(KERN_INFO "apm: entry %x:%x cseg16 %x dseg %x", apm_info.bios.cseg, apm_info.bios.offset, apm_info.bios.cseg_16, apm_info.bios.dseg); if (apm_info.bios.version > 0x100) printk(" cseg len %x, dseg len %x", apm_info.bios.cseg_len, apm_info.bios.dseg_len); if (apm_info.bios.version > 0x101) printk(" cseg16 len %x", apm_info.bios.cseg_16_len); printk("\n"); } if (apm_info.disabled) { printk(KERN_NOTICE "apm: disabled on user request.\n"); return -ENODEV; } if ((num_online_cpus() > 1) && !power_off && !smp) { printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n"); apm_info.disabled = 1; return -ENODEV; } if (!acpi_disabled) { printk(KERN_NOTICE "apm: overridden by ACPI.\n"); apm_info.disabled = 1; return -ENODEV; } /* * Set up the long jump entry point to the APM BIOS, which is called * from inline assembly. */ apm_bios_entry.offset = apm_info.bios.offset; apm_bios_entry.segment = APM_CS; /* * The APM 1.1 BIOS is supposed to provide limit information that it * recognizes. Many machines do this correctly, but many others do * not restrict themselves to their claimed limit. When this happens, * they will cause a segmentation violation in the kernel at boot time. * Most BIOS's, however, will respect a 64k limit, so we use that. * * Note we only set APM segments on CPU zero, since we pin the APM * code to that CPU. */ gdt = get_cpu_gdt_table(0); set_desc_base(&gdt[APM_CS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); set_desc_base(&gdt[APM_CS_16 >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); set_desc_base(&gdt[APM_DS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); proc_create("apm", 0, NULL, &apm_file_ops); kapmd_task = kthread_create(apm, NULL, "kapmd"); if (IS_ERR(kapmd_task)) { printk(KERN_ERR "apm: disabled - Unable to start kernel " "thread.\n"); err = PTR_ERR(kapmd_task); kapmd_task = NULL; remove_proc_entry("apm", NULL); return err; } wake_up_process(kapmd_task); if (num_online_cpus() > 1 && !smp) { printk(KERN_NOTICE "apm: disabled - APM is not SMP safe (power off active).\n"); return 0; } /* * Note we don't actually care if the misc_device cannot be registered. * this driver can do its job without it, even if userspace can't * control it. just log the error */ if (misc_register(&apm_device)) printk(KERN_WARNING "apm: Could not register misc device.\n"); if (HZ != 100) idle_period = (idle_period * HZ) / 100; if (idle_threshold < 100) { original_pm_idle = pm_idle; pm_idle = apm_cpu_idle; set_pm_idle = 1; } return 0; } static void __exit apm_exit(void) { int error; if (set_pm_idle) { pm_idle = original_pm_idle; /* * We are about to unload the current idle thread pm callback * (pm_idle), Wait for all processors to update cached/local * copies of pm_idle before proceeding. */ cpu_idle_wait(); } if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) && (apm_info.connection_version > 0x0100)) { error = apm_engage_power_management(APM_DEVICE_ALL, 0); if (error) apm_error("disengage power management", error); } misc_deregister(&apm_device); remove_proc_entry("apm", NULL); if (power_off) pm_power_off = NULL; if (kapmd_task) { kthread_stop(kapmd_task); kapmd_task = NULL; } } module_init(apm_init); module_exit(apm_exit); MODULE_AUTHOR("Stephen Rothwell"); MODULE_DESCRIPTION("Advanced Power Management"); MODULE_LICENSE("GPL"); module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Enable debug mode"); module_param(power_off, bool, 0444); MODULE_PARM_DESC(power_off, "Enable power off"); module_param(bounce_interval, int, 0444); MODULE_PARM_DESC(bounce_interval, "Set the number of ticks to ignore suspend bounces"); module_param(allow_ints, bool, 0444); MODULE_PARM_DESC(allow_ints, "Allow interrupts during BIOS calls"); module_param(broken_psr, bool, 0444); MODULE_PARM_DESC(broken_psr, "BIOS has a broken GetPowerStatus call"); module_param(realmode_power_off, bool, 0444); MODULE_PARM_DESC(realmode_power_off, "Switch to real mode before powering off"); module_param(idle_threshold, int, 0444); MODULE_PARM_DESC(idle_threshold, "System idle percentage above which to make APM BIOS idle calls"); module_param(idle_period, int, 0444); MODULE_PARM_DESC(idle_period, "Period (in sec/100) over which to caculate the idle percentage"); module_param(smp, bool, 0444); MODULE_PARM_DESC(smp, "Set this to enable APM use on an SMP platform. Use with caution on older systems"); MODULE_ALIAS_MISCDEV(APM_MINOR_DEV);
gpl-2.0
ktoonsez/KTSGS5
drivers/cpufreq/powernow-k7.c
4921
17534
/* * AMD K7 Powernow driver. * (C) 2003 Dave Jones on behalf of SuSE Labs. * (C) 2003-2004 Dave Jones <davej@redhat.com> * * Licensed under the terms of the GNU GPL License version 2. * Based upon datasheets & sample CPUs kindly provided by AMD. * * Errata 5: * CPU may fail to execute a FID/VID change in presence of interrupt. * - We cli/sti on stepping A0 CPUs around the FID/VID transition. * Errata 15: * CPU with half frequency multipliers may hang upon wakeup from disconnect. * - We disable half multipliers if ACPI is used on A0 stepping CPUs. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/dmi.h> #include <linux/timex.h> #include <linux/io.h> #include <asm/timer.h> /* Needed for recalibrate_cpu_khz() */ #include <asm/msr.h> #include <asm/cpu_device_id.h> #ifdef CONFIG_X86_POWERNOW_K7_ACPI #include <linux/acpi.h> #include <acpi/processor.h> #endif #include "powernow-k7.h" #define PFX "powernow: " struct psb_s { u8 signature[10]; u8 tableversion; u8 flags; u16 settlingtime; u8 reserved1; u8 numpst; }; struct pst_s { u32 cpuid; u8 fsbspeed; u8 maxfid; u8 startvid; u8 numpstates; }; #ifdef CONFIG_X86_POWERNOW_K7_ACPI union powernow_acpi_control_t { struct { unsigned long fid:5, vid:5, sgtc:20, res1:2; } bits; unsigned long val; }; #endif /* divide by 1000 to get VCore voltage in V. */ static const int mobile_vid_table[32] = { 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, 1075, 1050, 1025, 1000, 975, 950, 925, 0, }; /* divide by 10 to get FID. */ static const int fid_codes[32] = { 110, 115, 120, 125, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 30, 190, 40, 200, 130, 135, 140, 210, 150, 225, 160, 165, 170, 180, -1, -1, }; /* This parameter is used in order to force ACPI instead of legacy method for * configuration purpose. */ static int acpi_force; static struct cpufreq_frequency_table *powernow_table; static unsigned int can_scale_bus; static unsigned int can_scale_vid; static unsigned int minimum_speed = -1; static unsigned int maximum_speed; static unsigned int number_scales; static unsigned int fsb; static unsigned int latency; static char have_a0; static int check_fsb(unsigned int fsbspeed) { int delta; unsigned int f = fsb / 1000; delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed; return delta < 5; } static const struct x86_cpu_id powernow_k7_cpuids[] = { { X86_VENDOR_AMD, 6, }, {} }; MODULE_DEVICE_TABLE(x86cpu, powernow_k7_cpuids); static int check_powernow(void) { struct cpuinfo_x86 *c = &cpu_data(0); unsigned int maxei, eax, ebx, ecx, edx; if (!x86_match_cpu(powernow_k7_cpuids)) return 0; /* Get maximum capabilities */ maxei = cpuid_eax(0x80000000); if (maxei < 0x80000007) { /* Any powernow info ? */ #ifdef MODULE printk(KERN_INFO PFX "No powernow capabilities detected\n"); #endif return 0; } if ((c->x86_model == 6) && (c->x86_mask == 0)) { printk(KERN_INFO PFX "K7 660[A0] core detected, " "enabling errata workarounds\n"); have_a0 = 1; } cpuid(0x80000007, &eax, &ebx, &ecx, &edx); /* Check we can actually do something before we say anything.*/ if (!(edx & (1 << 1 | 1 << 2))) return 0; printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); if (edx & 1 << 1) { printk("frequency"); can_scale_bus = 1; } if ((edx & (1 << 1 | 1 << 2)) == 0x6) printk(" and "); if (edx & 1 << 2) { printk("voltage"); can_scale_vid = 1; } printk(".\n"); return 1; } #ifdef CONFIG_X86_POWERNOW_K7_ACPI static void invalidate_entry(unsigned int entry) { powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; } #endif static int get_ranges(unsigned char *pst) { unsigned int j; unsigned int speed; u8 fid, vid; powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); if (!powernow_table) return -ENOMEM; for (j = 0 ; j < number_scales; j++) { fid = *pst++; powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10; powernow_table[j].index = fid; /* lower 8 bits */ speed = powernow_table[j].frequency; if ((fid_codes[fid] % 10) == 5) { #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (have_a0 == 1) invalidate_entry(j); #endif } if (speed < minimum_speed) minimum_speed = speed; if (speed > maximum_speed) maximum_speed = speed; vid = *pst++; powernow_table[j].index |= (vid << 8); /* upper 8 bits */ pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000, vid, mobile_vid_table[vid]/1000, mobile_vid_table[vid]%1000); } powernow_table[number_scales].frequency = CPUFREQ_TABLE_END; powernow_table[number_scales].index = 0; return 0; } static void change_FID(int fid) { union msr_fidvidctl fidvidctl; rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.FID != fid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.FID = fid; fidvidctl.bits.VIDC = 0; fidvidctl.bits.FIDC = 1; wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); } } static void change_VID(int vid) { union msr_fidvidctl fidvidctl; rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); if (fidvidctl.bits.VID != vid) { fidvidctl.bits.SGTC = latency; fidvidctl.bits.VID = vid; fidvidctl.bits.FIDC = 0; fidvidctl.bits.VIDC = 1; wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); } } static void change_speed(unsigned int index) { u8 fid, vid; struct cpufreq_freqs freqs; union msr_fidvidstatus fidvidstatus; int cfid; /* fid are the lower 8 bits of the index we stored into * the cpufreq frequency table in powernow_decode_bios, * vid are the upper 8 bits. */ fid = powernow_table[index].index & 0xFF; vid = (powernow_table[index].index & 0xFF00) >> 8; freqs.cpu = 0; rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; freqs.old = fsb * fid_codes[cfid] / 10; freqs.new = powernow_table[index].frequency; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); /* Now do the magic poking into the MSRs. */ if (have_a0 == 1) /* A0 errata 5 */ local_irq_disable(); if (freqs.old > freqs.new) { /* Going down, so change FID first */ change_FID(fid); change_VID(vid); } else { /* Going up, so change VID first */ change_VID(vid); change_FID(fid); } if (have_a0 == 1) local_irq_enable(); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } #ifdef CONFIG_X86_POWERNOW_K7_ACPI static struct acpi_processor_performance *acpi_processor_perf; static int powernow_acpi_init(void) { int i; int retval = 0; union powernow_acpi_control_t pc; if (acpi_processor_perf != NULL && powernow_table != NULL) { retval = -EINVAL; goto err0; } acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance), GFP_KERNEL); if (!acpi_processor_perf) { retval = -ENOMEM; goto err0; } if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, GFP_KERNEL)) { retval = -ENOMEM; goto err05; } if (acpi_processor_register_performance(acpi_processor_perf, 0)) { retval = -EIO; goto err1; } if (acpi_processor_perf->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) { retval = -ENODEV; goto err2; } if (acpi_processor_perf->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) { retval = -ENODEV; goto err2; } number_scales = acpi_processor_perf->state_count; if (number_scales < 2) { retval = -ENODEV; goto err2; } powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL); if (!powernow_table) { retval = -ENOMEM; goto err2; } pc.val = (unsigned long) acpi_processor_perf->states[0].control; for (i = 0; i < number_scales; i++) { u8 fid, vid; struct acpi_processor_px *state = &acpi_processor_perf->states[i]; unsigned int speed, speed_mhz; pc.val = (unsigned long) state->control; pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", i, (u32) state->core_frequency, (u32) state->power, (u32) state->transition_latency, (u32) state->control, pc.bits.sgtc); vid = pc.bits.vid; fid = pc.bits.fid; powernow_table[i].frequency = fsb * fid_codes[fid] / 10; powernow_table[i].index = fid; /* lower 8 bits */ powernow_table[i].index |= (vid << 8); /* upper 8 bits */ speed = powernow_table[i].frequency; speed_mhz = speed / 1000; /* processor_perflib will multiply the MHz value by 1000 to * get a KHz value (e.g. 1266000). However, powernow-k7 works * with true KHz values (e.g. 1266768). To ensure that all * powernow frequencies are available, we must ensure that * ACPI doesn't restrict them, so we round up the MHz value * to ensure that perflib's computed KHz value is greater than * or equal to powernow's KHz value. */ if (speed % 1000 > 0) speed_mhz++; if ((fid_codes[fid] % 10) == 5) { if (have_a0 == 1) invalidate_entry(i); } pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed_mhz, vid, mobile_vid_table[vid]/1000, mobile_vid_table[vid]%1000); if (state->core_frequency != speed_mhz) { state->core_frequency = speed_mhz; pr_debug(" Corrected ACPI frequency to %d\n", speed_mhz); } if (latency < pc.bits.sgtc) latency = pc.bits.sgtc; if (speed < minimum_speed) minimum_speed = speed; if (speed > maximum_speed) maximum_speed = speed; } powernow_table[i].frequency = CPUFREQ_TABLE_END; powernow_table[i].index = 0; /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); return 0; err2: acpi_processor_unregister_performance(acpi_processor_perf, 0); err1: free_cpumask_var(acpi_processor_perf->shared_cpu_map); err05: kfree(acpi_processor_perf); err0: printk(KERN_WARNING PFX "ACPI perflib can not be used on " "this platform\n"); acpi_processor_perf = NULL; return retval; } #else static int powernow_acpi_init(void) { printk(KERN_INFO PFX "no support for ACPI processor found." " Please recompile your kernel with ACPI processor\n"); return -EINVAL; } #endif static void print_pst_entry(struct pst_s *pst, unsigned int j) { pr_debug("PST:%d (@%p)\n", j, pst); pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); } static int powernow_decode_bios(int maxfid, int startvid) { struct psb_s *psb; struct pst_s *pst; unsigned int i, j; unsigned char *p; unsigned int etuple; unsigned int ret; etuple = cpuid_eax(0x80000001); for (i = 0xC0000; i < 0xffff0 ; i += 16) { p = phys_to_virt(i); if (memcmp(p, "AMDK7PNOW!", 10) == 0) { pr_debug("Found PSB header at %p\n", p); psb = (struct psb_s *) p; pr_debug("Table version: 0x%x\n", psb->tableversion); if (psb->tableversion != 0x12) { printk(KERN_INFO PFX "Sorry, only v1.2 tables" " supported right now\n"); return -ENODEV; } pr_debug("Flags: 0x%x\n", psb->flags); if ((psb->flags & 1) == 0) pr_debug("Mobile voltage regulator\n"); else pr_debug("Desktop voltage regulator\n"); latency = psb->settlingtime; if (latency < 100) { printk(KERN_INFO PFX "BIOS set settling time " "to %d microseconds. " "Should be at least 100. " "Correcting.\n", latency); latency = 100; } pr_debug("Settling Time: %d microseconds.\n", psb->settlingtime); pr_debug("Has %d PST tables. (Only dumping ones " "relevant to this CPU).\n", psb->numpst); p += sizeof(struct psb_s); pst = (struct pst_s *) p; for (j = 0; j < psb->numpst; j++) { pst = (struct pst_s *) p; number_scales = pst->numpstates; if ((etuple == pst->cpuid) && check_fsb(pst->fsbspeed) && (maxfid == pst->maxfid) && (startvid == pst->startvid)) { print_pst_entry(pst, j); p = (char *)pst + sizeof(struct pst_s); ret = get_ranges(p); return ret; } else { unsigned int k; p = (char *)pst + sizeof(struct pst_s); for (k = 0; k < number_scales; k++) p += 2; } } printk(KERN_INFO PFX "No PST tables match this cpuid " "(0x%x)\n", etuple); printk(KERN_INFO PFX "This is indicative of a broken " "BIOS.\n"); return -EINVAL; } p++; } return -ENODEV; } static int powernow_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate; if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, relation, &newstate)) return -EINVAL; change_speed(newstate); return 0; } static int powernow_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, powernow_table); } /* * We use the fact that the bus frequency is somehow * a multiple of 100000/3 khz, then we compute sgtc according * to this multiple. * That way, we match more how AMD thinks all of that work. * We will then get the same kind of behaviour already tested under * the "well-known" other OS. */ static int __cpuinit fixup_sgtc(void) { unsigned int sgtc; unsigned int m; m = fsb / 3333; if ((m % 10) >= 5) m += 5; m /= 10; sgtc = 100 * m * latency; sgtc = sgtc / 3; if (sgtc > 0xfffff) { printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc); sgtc = 0xfffff; } return sgtc; } static unsigned int powernow_get(unsigned int cpu) { union msr_fidvidstatus fidvidstatus; unsigned int cfid; if (cpu) return 0; rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); cfid = fidvidstatus.bits.CFID; return fsb * fid_codes[cfid] / 10; } static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d) { printk(KERN_WARNING PFX "%s laptop with broken PST tables in BIOS detected.\n", d->ident); printk(KERN_WARNING PFX "You need to downgrade to 3A21 (09/09/2002), or try a newer " "BIOS than 3A71 (01/20/2003)\n"); printk(KERN_WARNING PFX "cpufreq scaling has been disabled as a result of this.\n"); return 0; } /* * Some Athlon laptops have really fucked PST tables. * A BIOS update is all that can save them. * Mention this, and disable cpufreq. */ static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = { { .callback = acer_cpufreq_pst, .ident = "Acer Aspire", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"), DMI_MATCH(DMI_BIOS_VERSION, "3A71"), }, }, { } }; static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) { union msr_fidvidstatus fidvidstatus; int result; if (policy->cpu != 0) return -ENODEV; rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); recalibrate_cpu_khz(); fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID]; if (!fsb) { printk(KERN_WARNING PFX "can not determine bus frequency\n"); return -EINVAL; } pr_debug("FSB: %3dMHz\n", fsb/1000); if (dmi_check_system(powernow_dmi_table) || acpi_force) { printk(KERN_INFO PFX "PSB/PST known to be broken. " "Trying ACPI instead\n"); result = powernow_acpi_init(); } else { result = powernow_decode_bios(fidvidstatus.bits.MFID, fidvidstatus.bits.SVID); if (result) { printk(KERN_INFO PFX "Trying ACPI perflib\n"); maximum_speed = 0; minimum_speed = -1; latency = 0; result = powernow_acpi_init(); if (result) { printk(KERN_INFO PFX "ACPI and legacy methods failed\n"); } } else { /* SGTC use the bus clock as timer */ latency = fixup_sgtc(); printk(KERN_INFO PFX "SGTC: %d\n", latency); } } if (result) return result; printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", minimum_speed/1000, maximum_speed/1000); policy->cpuinfo.transition_latency = cpufreq_scale(2000000UL, fsb, latency); policy->cur = powernow_get(0); cpufreq_frequency_table_get_attr(powernow_table, policy->cpu); return cpufreq_frequency_table_cpuinfo(policy, powernow_table); } static int powernow_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (acpi_processor_perf) { acpi_processor_unregister_performance(acpi_processor_perf, 0); free_cpumask_var(acpi_processor_perf->shared_cpu_map); kfree(acpi_processor_perf); } #endif kfree(powernow_table); return 0; } static struct freq_attr *powernow_table_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver powernow_driver = { .verify = powernow_verify, .target = powernow_target, .get = powernow_get, #ifdef CONFIG_X86_POWERNOW_K7_ACPI .bios_limit = acpi_processor_get_bios_limit, #endif .init = powernow_cpu_init, .exit = powernow_cpu_exit, .name = "powernow-k7", .owner = THIS_MODULE, .attr = powernow_table_attr, }; static int __init powernow_init(void) { if (check_powernow() == 0) return -ENODEV; return cpufreq_register_driver(&powernow_driver); } static void __exit powernow_exit(void) { cpufreq_unregister_driver(&powernow_driver); } module_param(acpi_force, int, 0444); MODULE_PARM_DESC(acpi_force, "Force ACPI to be used."); MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_DESCRIPTION("Powernow driver for AMD K7 processors."); MODULE_LICENSE("GPL"); late_initcall(powernow_init); module_exit(powernow_exit);
gpl-2.0
Motorhead1991/android_kernel_blu_studio5qcom
drivers/staging/media/as102/as10x_cmd_cfg.c
5177
5836
/* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include "as102_drv.h" #include "as10x_types.h" #include "as10x_cmd.h" /***************************/ /* FUNCTION DEFINITION */ /***************************/ /** * as10x_cmd_get_context - Send get context command to AS10x * @adap: pointer to AS10x bus adapter * @tag: context tag * @pvalue: pointer where to store context value read * * Return 0 on success or negative value in case of error. */ int as10x_cmd_get_context(struct as10x_bus_adapter_t *adap, uint16_t tag, uint32_t *pvalue) { int error; struct as10x_cmd_t *pcmd, *prsp; ENTER(); pcmd = adap->cmd; prsp = adap->rsp; /* prepare command */ as10x_cmd_build(pcmd, (++adap->cmd_xid), sizeof(pcmd->body.context.req)); /* fill command */ pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); pcmd->body.context.req.tag = cpu_to_le16(tag); pcmd->body.context.req.type = cpu_to_le16(GET_CONTEXT_DATA); /* send command */ if (adap->ops->xfer_cmd) { error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd, sizeof(pcmd->body.context.req) + HEADER_SIZE, (uint8_t *) prsp, sizeof(prsp->body.context.rsp) + HEADER_SIZE); } else { error = AS10X_CMD_ERROR; } if (error < 0) goto out; /* parse response: context command do not follow the common response */ /* structure -> specific handling response parse required */ error = as10x_context_rsp_parse(prsp, CONTROL_PROC_CONTEXT_RSP); if (error == 0) { /* Response OK -> get response data */ *pvalue = le32_to_cpu(prsp->body.context.rsp.reg_val.u.value32); /* value returned is always a 32-bit value */ } out: LEAVE(); return error; } /** * as10x_cmd_set_context - send set context command to AS10x * @adap: pointer to AS10x bus adapter * @tag: context tag * @value: value to set in context * * Return 0 on success or negative value in case of error. */ int as10x_cmd_set_context(struct as10x_bus_adapter_t *adap, uint16_t tag, uint32_t value) { int error; struct as10x_cmd_t *pcmd, *prsp; ENTER(); pcmd = adap->cmd; prsp = adap->rsp; /* prepare command */ as10x_cmd_build(pcmd, (++adap->cmd_xid), sizeof(pcmd->body.context.req)); /* fill command */ pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); /* pcmd->body.context.req.reg_val.mode initialization is not required */ pcmd->body.context.req.reg_val.u.value32 = cpu_to_le32(value); pcmd->body.context.req.tag = cpu_to_le16(tag); pcmd->body.context.req.type = cpu_to_le16(SET_CONTEXT_DATA); /* send command */ if (adap->ops->xfer_cmd) { error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd, sizeof(pcmd->body.context.req) + HEADER_SIZE, (uint8_t *) prsp, sizeof(prsp->body.context.rsp) + HEADER_SIZE); } else { error = AS10X_CMD_ERROR; } if (error < 0) goto out; /* parse response: context command do not follow the common response */ /* structure -> specific handling response parse required */ error = as10x_context_rsp_parse(prsp, CONTROL_PROC_CONTEXT_RSP); out: LEAVE(); return error; } /** * as10x_cmd_eLNA_change_mode - send eLNA change mode command to AS10x * @adap: pointer to AS10x bus adapter * @mode: mode selected: * - ON : 0x0 => eLNA always ON * - OFF : 0x1 => eLNA always OFF * - AUTO : 0x2 => eLNA follow hysteresis parameters * to be ON or OFF * * Return 0 on success or negative value in case of error. */ int as10x_cmd_eLNA_change_mode(struct as10x_bus_adapter_t *adap, uint8_t mode) { int error; struct as10x_cmd_t *pcmd, *prsp; ENTER(); pcmd = adap->cmd; prsp = adap->rsp; /* prepare command */ as10x_cmd_build(pcmd, (++adap->cmd_xid), sizeof(pcmd->body.cfg_change_mode.req)); /* fill command */ pcmd->body.cfg_change_mode.req.proc_id = cpu_to_le16(CONTROL_PROC_ELNA_CHANGE_MODE); pcmd->body.cfg_change_mode.req.mode = mode; /* send command */ if (adap->ops->xfer_cmd) { error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd, sizeof(pcmd->body.cfg_change_mode.req) + HEADER_SIZE, (uint8_t *) prsp, sizeof(prsp->body.cfg_change_mode.rsp) + HEADER_SIZE); } else { error = AS10X_CMD_ERROR; } if (error < 0) goto out; /* parse response */ error = as10x_rsp_parse(prsp, CONTROL_PROC_ELNA_CHANGE_MODE_RSP); out: LEAVE(); return error; } /** * as10x_context_rsp_parse - Parse context command response * @prsp: pointer to AS10x command response buffer * @proc_id: id of the command * * Since the contex command reponse does not follow the common * response, a specific parse function is required. * Return 0 on success or negative value in case of error. */ int as10x_context_rsp_parse(struct as10x_cmd_t *prsp, uint16_t proc_id) { int err; err = prsp->body.context.rsp.error; if ((err == 0) && (le16_to_cpu(prsp->body.context.rsp.proc_id) == proc_id)) { return 0; } return AS10X_CMD_ERROR; }
gpl-2.0
12019/old_samsung-lt02wifi-kernel
drivers/staging/media/solo6x10/v4l2-enc.c
5177
47084
/* * Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com * Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-common.h> #include <media/videobuf-dma-sg.h> #include "solo6x10.h" #include "tw28.h" #include "solo6x10-jpeg.h" #define MIN_VID_BUFFERS 4 #define FRAME_BUF_SIZE (128 * 1024) #define MP4_QS 16 static int solo_enc_thread(void *data); extern unsigned video_nr; struct solo_enc_fh { struct solo_enc_dev *enc; u32 fmt; u16 rd_idx; u8 enc_on; enum solo_enc_types type; struct videobuf_queue vidq; struct list_head vidq_active; struct task_struct *kthread; struct p2m_desc desc[SOLO_NR_P2M_DESC]; }; static const u32 solo_user_ctrls[] = { V4L2_CID_BRIGHTNESS, V4L2_CID_CONTRAST, V4L2_CID_SATURATION, V4L2_CID_HUE, V4L2_CID_SHARPNESS, 0 }; static const u32 solo_mpeg_ctrls[] = { V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0 }; static const u32 solo_private_ctrls[] = { V4L2_CID_MOTION_ENABLE, V4L2_CID_MOTION_THRESHOLD, 0 }; static const u32 solo_fmtx_ctrls[] = { V4L2_CID_RDS_TX_RADIO_TEXT, 0 }; static const u32 *solo_ctrl_classes[] = { solo_user_ctrls, solo_mpeg_ctrls, solo_fmtx_ctrls, solo_private_ctrls, NULL }; static int solo_is_motion_on(struct solo_enc_dev *solo_enc) { struct solo_dev *solo_dev = solo_enc->solo_dev; u8 ch = solo_enc->ch; if (solo_dev->motion_mask & (1 << ch)) return 1; return 0; } static void solo_motion_toggle(struct solo_enc_dev *solo_enc, int on) { struct solo_dev *solo_dev = solo_enc->solo_dev; u8 ch = solo_enc->ch; spin_lock(&solo_enc->lock); if (on) solo_dev->motion_mask |= (1 << ch); else solo_dev->motion_mask &= ~(1 << ch); /* Do this regardless of if we are turning on or off */ solo_reg_write(solo_enc->solo_dev, SOLO_VI_MOT_CLEAR, 1 << solo_enc->ch); solo_enc->motion_detected = 0; solo_reg_write(solo_dev, SOLO_VI_MOT_ADR, SOLO_VI_MOTION_EN(solo_dev->motion_mask) | (SOLO_MOTION_EXT_ADDR(solo_dev) >> 16)); if (solo_dev->motion_mask) solo_irq_on(solo_dev, SOLO_IRQ_MOTION); else solo_irq_off(solo_dev, SOLO_IRQ_MOTION); spin_unlock(&solo_enc->lock); } /* Should be called with solo_enc->lock held */ static void solo_update_mode(struct solo_enc_dev *solo_enc) { struct solo_dev *solo_dev = solo_enc->solo_dev; assert_spin_locked(&solo_enc->lock); solo_enc->interlaced = (solo_enc->mode & 0x08) ? 1 : 0; solo_enc->bw_weight = max(solo_dev->fps / solo_enc->interval, 1); switch (solo_enc->mode) { case SOLO_ENC_MODE_CIF: solo_enc->width = solo_dev->video_hsize >> 1; solo_enc->height = solo_dev->video_vsize; break; case SOLO_ENC_MODE_D1: solo_enc->width = solo_dev->video_hsize; solo_enc->height = solo_dev->video_vsize << 1; solo_enc->bw_weight <<= 2; break; default: WARN(1, "mode is unknown\n"); } } /* Should be called with solo_enc->lock held */ static int solo_enc_on(struct solo_enc_fh *fh) { struct solo_enc_dev *solo_enc = fh->enc; u8 ch = solo_enc->ch; struct solo_dev *solo_dev = solo_enc->solo_dev; u8 interval; assert_spin_locked(&solo_enc->lock); if (fh->enc_on) return 0; solo_update_mode(solo_enc); /* Make sure to bw check on first reader */ if (!atomic_read(&solo_enc->readers)) { if (solo_enc->bw_weight > solo_dev->enc_bw_remain) return -EBUSY; else solo_dev->enc_bw_remain -= solo_enc->bw_weight; } fh->enc_on = 1; fh->rd_idx = solo_enc->solo_dev->enc_wr_idx; if (fh->type == SOLO_ENC_TYPE_EXT) solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(ch), 1); if (atomic_inc_return(&solo_enc->readers) > 1) return 0; /* Disable all encoding for this channel */ solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(ch), 0); /* Common for both std and ext encoding */ solo_reg_write(solo_dev, SOLO_VE_CH_INTL(ch), solo_enc->interlaced ? 1 : 0); if (solo_enc->interlaced) interval = solo_enc->interval - 1; else interval = solo_enc->interval; /* Standard encoding only */ solo_reg_write(solo_dev, SOLO_VE_CH_GOP(ch), solo_enc->gop); solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp); solo_reg_write(solo_dev, SOLO_CAP_CH_INTV(ch), interval); /* Extended encoding only */ solo_reg_write(solo_dev, SOLO_VE_CH_GOP_E(ch), solo_enc->gop); solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp); solo_reg_write(solo_dev, SOLO_CAP_CH_INTV_E(ch), interval); /* Enables the standard encoder */ solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(ch), solo_enc->mode); /* Settle down Beavis... */ mdelay(10); return 0; } static void solo_enc_off(struct solo_enc_fh *fh) { struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; if (!fh->enc_on) return; if (fh->kthread) { kthread_stop(fh->kthread); fh->kthread = NULL; } solo_dev->enc_bw_remain += solo_enc->bw_weight; fh->enc_on = 0; if (atomic_dec_return(&solo_enc->readers) > 0) return; solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(solo_enc->ch), 0); solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(solo_enc->ch), 0); } static int solo_start_fh_thread(struct solo_enc_fh *fh) { struct solo_enc_dev *solo_enc = fh->enc; fh->kthread = kthread_run(solo_enc_thread, fh, SOLO6X10_NAME "_enc"); /* Oops, we had a problem */ if (IS_ERR(fh->kthread)) { spin_lock(&solo_enc->lock); solo_enc_off(fh); spin_unlock(&solo_enc->lock); return PTR_ERR(fh->kthread); } return 0; } static void enc_reset_gop(struct solo_dev *solo_dev, u8 ch) { BUG_ON(ch >= solo_dev->nr_chans); solo_reg_write(solo_dev, SOLO_VE_CH_GOP(ch), 1); solo_dev->v4l2_enc[ch]->reset_gop = 1; } static int enc_gop_reset(struct solo_dev *solo_dev, u8 ch, u8 vop) { BUG_ON(ch >= solo_dev->nr_chans); if (!solo_dev->v4l2_enc[ch]->reset_gop) return 0; if (vop) return 1; solo_dev->v4l2_enc[ch]->reset_gop = 0; solo_reg_write(solo_dev, SOLO_VE_CH_GOP(ch), solo_dev->v4l2_enc[ch]->gop); return 0; } static void enc_write_sg(struct scatterlist *sglist, void *buf, int size) { struct scatterlist *sg; u8 *src = buf; for (sg = sglist; sg && size > 0; sg = sg_next(sg)) { u8 *p = sg_virt(sg); size_t len = sg_dma_len(sg); int i; for (i = 0; i < len && size; i++) p[i] = *(src++); } } static int enc_get_mpeg_dma_sg(struct solo_dev *solo_dev, struct p2m_desc *desc, struct scatterlist *sglist, int skip, unsigned int off, unsigned int size) { int ret; if (off > SOLO_MP4E_EXT_SIZE(solo_dev)) return -EINVAL; if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev)) { return solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E, desc, 0, sglist, skip, SOLO_MP4E_EXT_ADDR(solo_dev) + off, size); } /* Buffer wrap */ ret = solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E, desc, 0, sglist, skip, SOLO_MP4E_EXT_ADDR(solo_dev) + off, SOLO_MP4E_EXT_SIZE(solo_dev) - off); ret |= solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E, desc, 0, sglist, skip + SOLO_MP4E_EXT_SIZE(solo_dev) - off, SOLO_MP4E_EXT_ADDR(solo_dev), size + off - SOLO_MP4E_EXT_SIZE(solo_dev)); return ret; } static int enc_get_mpeg_dma_t(struct solo_dev *solo_dev, dma_addr_t buf, unsigned int off, unsigned int size) { int ret; if (off > SOLO_MP4E_EXT_SIZE(solo_dev)) return -EINVAL; if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev)) { return solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf, SOLO_MP4E_EXT_ADDR(solo_dev) + off, size); } /* Buffer wrap */ ret = solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf, SOLO_MP4E_EXT_ADDR(solo_dev) + off, SOLO_MP4E_EXT_SIZE(solo_dev) - off); ret |= solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf + SOLO_MP4E_EXT_SIZE(solo_dev) - off, SOLO_MP4E_EXT_ADDR(solo_dev), size + off - SOLO_MP4E_EXT_SIZE(solo_dev)); return ret; } static int enc_get_mpeg_dma(struct solo_dev *solo_dev, void *buf, unsigned int off, unsigned int size) { int ret; dma_addr_t dma_addr = pci_map_single(solo_dev->pdev, buf, size, PCI_DMA_FROMDEVICE); ret = enc_get_mpeg_dma_t(solo_dev, dma_addr, off, size); pci_unmap_single(solo_dev->pdev, dma_addr, size, PCI_DMA_FROMDEVICE); return ret; } static int enc_get_jpeg_dma_sg(struct solo_dev *solo_dev, struct p2m_desc *desc, struct scatterlist *sglist, int skip, unsigned int off, unsigned int size) { int ret; if (off > SOLO_JPEG_EXT_SIZE(solo_dev)) return -EINVAL; if (off + size <= SOLO_JPEG_EXT_SIZE(solo_dev)) { return solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG, desc, 0, sglist, skip, SOLO_JPEG_EXT_ADDR(solo_dev) + off, size); } /* Buffer wrap */ ret = solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG, desc, 0, sglist, skip, SOLO_JPEG_EXT_ADDR(solo_dev) + off, SOLO_JPEG_EXT_SIZE(solo_dev) - off); ret |= solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG, desc, 0, sglist, skip + SOLO_JPEG_EXT_SIZE(solo_dev) - off, SOLO_JPEG_EXT_ADDR(solo_dev), size + off - SOLO_JPEG_EXT_SIZE(solo_dev)); return ret; } /* Returns true of __chk is within the first __range bytes of __off */ #define OFF_IN_RANGE(__off, __range, __chk) \ ((__off <= __chk) && ((__off + __range) >= __chk)) static void solo_jpeg_header(struct solo_enc_dev *solo_enc, struct videobuf_dmabuf *vbuf) { struct scatterlist *sg; void *src = jpeg_header; size_t copied = 0; size_t to_copy = sizeof(jpeg_header); for (sg = vbuf->sglist; sg && copied < to_copy; sg = sg_next(sg)) { size_t this_copy = min(sg_dma_len(sg), (unsigned int)(to_copy - copied)); u8 *p = sg_virt(sg); memcpy(p, src + copied, this_copy); if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 5)) p[(SOF0_START + 5) - copied] = 0xff & (solo_enc->height >> 8); if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 6)) p[(SOF0_START + 6) - copied] = 0xff & solo_enc->height; if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 7)) p[(SOF0_START + 7) - copied] = 0xff & (solo_enc->width >> 8); if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 8)) p[(SOF0_START + 8) - copied] = 0xff & solo_enc->width; copied += this_copy; } } static int solo_fill_jpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf, struct videobuf_buffer *vb, struct videobuf_dmabuf *vbuf) { struct solo_dev *solo_dev = fh->enc->solo_dev; int size = enc_buf->jpeg_size; /* Copy the header first (direct write) */ solo_jpeg_header(fh->enc, vbuf); vb->size = size + sizeof(jpeg_header); /* Grab the jpeg frame */ return enc_get_jpeg_dma_sg(solo_dev, fh->desc, vbuf->sglist, sizeof(jpeg_header), enc_buf->jpeg_off, size); } static inline int vop_interlaced(__le32 *vh) { return (__le32_to_cpu(vh[0]) >> 30) & 1; } static inline u32 vop_size(__le32 *vh) { return __le32_to_cpu(vh[0]) & 0xFFFFF; } static inline u8 vop_hsize(__le32 *vh) { return (__le32_to_cpu(vh[1]) >> 8) & 0xFF; } static inline u8 vop_vsize(__le32 *vh) { return __le32_to_cpu(vh[1]) & 0xFF; } /* must be called with *bits % 8 = 0 */ static void write_bytes(u8 **out, unsigned *bits, const u8 *src, unsigned count) { memcpy(*out, src, count); *out += count; *bits += count * 8; } static void write_bits(u8 **out, unsigned *bits, u32 value, unsigned count) { value <<= 32 - count; // shift to the right while (count--) { **out <<= 1; **out |= !!(value & (1 << 31)); /* MSB */ value <<= 1; if (++(*bits) % 8 == 0) (*out)++; } } static void write_ue(u8 **out, unsigned *bits, unsigned value) /* H.264 only */ { uint32_t max = 0, cnt = 0; while (value > max) { max = (max + 2) * 2 - 2; cnt++; } write_bits(out, bits, 1, cnt + 1); write_bits(out, bits, ~(max - value), cnt); } static void write_se(u8 **out, unsigned *bits, int value) /* H.264 only */ { if (value <= 0) write_ue(out, bits, -value * 2); else write_ue(out, bits, value * 2 - 1); } static void write_mpeg4_end(u8 **out, unsigned *bits) { write_bits(out, bits, 0, 1); /* align on 32-bit boundary */ if (*bits % 32) write_bits(out, bits, 0xFFFFFFFF, 32 - *bits % 32); } static void write_h264_end(u8 **out, unsigned *bits, int align) { write_bits(out, bits, 1, 1); while ((*bits) % 8) write_bits(out, bits, 0, 1); if (align) while ((*bits) % 32) write_bits(out, bits, 0, 1); } static void mpeg4_write_vol(u8 **out, struct solo_dev *solo_dev, __le32 *vh, unsigned fps, unsigned interval) { static const u8 hdr[] = { 0, 0, 1, 0x00 /* video_object_start_code */, 0, 0, 1, 0x20 /* video_object_layer_start_code */ }; unsigned bits = 0; unsigned width = vop_hsize(vh) << 4; unsigned height = vop_vsize(vh) << 4; unsigned interlaced = vop_interlaced(vh); write_bytes(out, &bits, hdr, sizeof(hdr)); write_bits(out, &bits, 0, 1); /* random_accessible_vol */ write_bits(out, &bits, 0x04, 8); /* video_object_type_indication: main */ write_bits(out, &bits, 1, 1); /* is_object_layer_identifier */ write_bits(out, &bits, 2, 4); /* video_object_layer_verid: table V2-39 */ write_bits(out, &bits, 0, 3); /* video_object_layer_priority */ if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) write_bits(out, &bits, 3, 4); /* aspect_ratio_info, assuming 4:3 */ else write_bits(out, &bits, 2, 4); write_bits(out, &bits, 1, 1); /* vol_control_parameters */ write_bits(out, &bits, 1, 2); /* chroma_format: 4:2:0 */ write_bits(out, &bits, 1, 1); /* low_delay */ write_bits(out, &bits, 0, 1); /* vbv_parameters */ write_bits(out, &bits, 0, 2); /* video_object_layer_shape: rectangular */ write_bits(out, &bits, 1, 1); /* marker_bit */ write_bits(out, &bits, fps, 16); /* vop_time_increment_resolution */ write_bits(out, &bits, 1, 1); /* marker_bit */ write_bits(out, &bits, 1, 1); /* fixed_vop_rate */ write_bits(out, &bits, interval, 15); /* fixed_vop_time_increment */ write_bits(out, &bits, 1, 1); /* marker_bit */ write_bits(out, &bits, width, 13); /* video_object_layer_width */ write_bits(out, &bits, 1, 1); /* marker_bit */ write_bits(out, &bits, height, 13); /* video_object_layer_height */ write_bits(out, &bits, 1, 1); /* marker_bit */ write_bits(out, &bits, interlaced, 1); /* interlaced */ write_bits(out, &bits, 1, 1); /* obmc_disable */ write_bits(out, &bits, 0, 2); /* sprite_enable */ write_bits(out, &bits, 0, 1); /* not_8_bit */ write_bits(out, &bits, 1, 0); /* quant_type */ write_bits(out, &bits, 0, 1); /* load_intra_quant_mat */ write_bits(out, &bits, 0, 1); /* load_nonintra_quant_mat */ write_bits(out, &bits, 0, 1); /* quarter_sample */ write_bits(out, &bits, 1, 1); /* complexity_estimation_disable */ write_bits(out, &bits, 1, 1); /* resync_marker_disable */ write_bits(out, &bits, 0, 1); /* data_partitioned */ write_bits(out, &bits, 0, 1); /* newpred_enable */ write_bits(out, &bits, 0, 1); /* reduced_resolution_vop_enable */ write_bits(out, &bits, 0, 1); /* scalability */ write_mpeg4_end(out, &bits); } static void h264_write_vol(u8 **out, struct solo_dev *solo_dev, __le32 *vh) { static const u8 sps[] = { 0, 0, 0, 1 /* start code */, 0x67, 66 /* profile_idc */, 0 /* constraints */, 30 /* level_idc */ }; static const u8 pps[] = { 0, 0, 0, 1 /* start code */, 0x68 }; unsigned bits = 0; unsigned mbs_w = vop_hsize(vh); unsigned mbs_h = vop_vsize(vh); write_bytes(out, &bits, sps, sizeof(sps)); write_ue(out, &bits, 0); /* seq_parameter_set_id */ write_ue(out, &bits, 5); /* log2_max_frame_num_minus4 */ write_ue(out, &bits, 0); /* pic_order_cnt_type */ write_ue(out, &bits, 6); /* log2_max_pic_order_cnt_lsb_minus4 */ write_ue(out, &bits, 1); /* max_num_ref_frames */ write_bits(out, &bits, 0, 1); /* gaps_in_frame_num_value_allowed_flag */ write_ue(out, &bits, mbs_w - 1); /* pic_width_in_mbs_minus1 */ write_ue(out, &bits, mbs_h - 1); /* pic_height_in_map_units_minus1 */ write_bits(out, &bits, 1, 1); /* frame_mbs_only_flag */ write_bits(out, &bits, 1, 1); /* direct_8x8_frame_field_flag */ write_bits(out, &bits, 0, 1); /* frame_cropping_flag */ write_bits(out, &bits, 0, 1); /* vui_parameters_present_flag */ write_h264_end(out, &bits, 0); write_bytes(out, &bits, pps, sizeof(pps)); write_ue(out, &bits, 0); /* pic_parameter_set_id */ write_ue(out, &bits, 0); /* seq_parameter_set_id */ write_bits(out, &bits, 0, 1); /* entropy_coding_mode_flag */ write_bits(out, &bits, 0, 1); /* bottom_field_pic_order_in_frame_present_flag */ write_ue(out, &bits, 0); /* num_slice_groups_minus1 */ write_ue(out, &bits, 0); /* num_ref_idx_l0_default_active_minus1 */ write_ue(out, &bits, 0); /* num_ref_idx_l1_default_active_minus1 */ write_bits(out, &bits, 0, 1); /* weighted_pred_flag */ write_bits(out, &bits, 0, 2); /* weighted_bipred_idc */ write_se(out, &bits, 0); /* pic_init_qp_minus26 */ write_se(out, &bits, 0); /* pic_init_qs_minus26 */ write_se(out, &bits, 2); /* chroma_qp_index_offset */ write_bits(out, &bits, 0, 1); /* deblocking_filter_control_present_flag */ write_bits(out, &bits, 1, 1); /* constrained_intra_pred_flag */ write_bits(out, &bits, 0, 1); /* redundant_pic_cnt_present_flag */ write_h264_end(out, &bits, 1); } static int solo_fill_mpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf, struct videobuf_buffer *vb, struct videobuf_dmabuf *vbuf) { struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; #define VH_WORDS 16 #define MAX_VOL_HEADER_LENGTH 64 __le32 vh[VH_WORDS]; int ret; int frame_size, frame_off; int skip = 0; if (WARN_ON_ONCE(enc_buf->size <= sizeof(vh))) return -EINVAL; /* First get the hardware vop header (not real mpeg) */ ret = enc_get_mpeg_dma(solo_dev, vh, enc_buf->off, sizeof(vh)); if (WARN_ON_ONCE(ret)) return ret; if (WARN_ON_ONCE(vop_size(vh) > enc_buf->size)) return -EINVAL; vb->width = vop_hsize(vh) << 4; vb->height = vop_vsize(vh) << 4; vb->size = vop_size(vh); /* If this is a key frame, add extra m4v header */ if (!enc_buf->vop) { u8 header[MAX_VOL_HEADER_LENGTH], *out = header; if (solo_dev->flags & FLAGS_6110) h264_write_vol(&out, solo_dev, vh); else mpeg4_write_vol(&out, solo_dev, vh, solo_dev->fps * 1000, solo_enc->interval * 1000); skip = out - header; enc_write_sg(vbuf->sglist, header, skip); /* Adjust the dma buffer past this header */ vb->size += skip; } /* Now get the actual mpeg payload */ frame_off = (enc_buf->off + sizeof(vh)) % SOLO_MP4E_EXT_SIZE(solo_dev); frame_size = enc_buf->size - sizeof(vh); ret = enc_get_mpeg_dma_sg(solo_dev, fh->desc, vbuf->sglist, skip, frame_off, frame_size); WARN_ON_ONCE(ret); return ret; } static void solo_enc_fillbuf(struct solo_enc_fh *fh, struct videobuf_buffer *vb) { struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; struct solo_enc_buf *enc_buf = NULL; struct videobuf_dmabuf *vbuf; int ret; int error = 1; u16 idx = fh->rd_idx; while (idx != solo_dev->enc_wr_idx) { struct solo_enc_buf *ebuf = &solo_dev->enc_buf[idx]; idx = (idx + 1) % SOLO_NR_RING_BUFS; if (ebuf->ch != solo_enc->ch) continue; if (fh->fmt == V4L2_PIX_FMT_MPEG) { if (fh->type == ebuf->type) { enc_buf = ebuf; break; } } else { /* For mjpeg, keep reading to the newest frame */ enc_buf = ebuf; } } fh->rd_idx = idx; if (WARN_ON_ONCE(!enc_buf)) goto buf_err; if ((fh->fmt == V4L2_PIX_FMT_MPEG && vb->bsize < enc_buf->size) || (fh->fmt == V4L2_PIX_FMT_MJPEG && vb->bsize < (enc_buf->jpeg_size + sizeof(jpeg_header)))) { WARN_ON_ONCE(1); goto buf_err; } vbuf = videobuf_to_dma(vb); if (WARN_ON_ONCE(!vbuf)) goto buf_err; if (fh->fmt == V4L2_PIX_FMT_MPEG) ret = solo_fill_mpeg(fh, enc_buf, vb, vbuf); else ret = solo_fill_jpeg(fh, enc_buf, vb, vbuf); if (!ret) error = 0; buf_err: if (error) { vb->state = VIDEOBUF_ERROR; } else { vb->field_count++; vb->ts = enc_buf->ts; vb->state = VIDEOBUF_DONE; } wake_up(&vb->done); return; } static void solo_enc_thread_try(struct solo_enc_fh *fh) { struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; struct videobuf_buffer *vb; for (;;) { spin_lock(&solo_enc->lock); if (fh->rd_idx == solo_dev->enc_wr_idx) break; if (list_empty(&fh->vidq_active)) break; vb = list_first_entry(&fh->vidq_active, struct videobuf_buffer, queue); if (!waitqueue_active(&vb->done)) break; list_del(&vb->queue); spin_unlock(&solo_enc->lock); solo_enc_fillbuf(fh, vb); } assert_spin_locked(&solo_enc->lock); spin_unlock(&solo_enc->lock); } static int solo_enc_thread(void *data) { struct solo_enc_fh *fh = data; struct solo_enc_dev *solo_enc = fh->enc; DECLARE_WAITQUEUE(wait, current); set_freezable(); add_wait_queue(&solo_enc->thread_wait, &wait); for (;;) { long timeout = schedule_timeout_interruptible(HZ); if (timeout == -ERESTARTSYS || kthread_should_stop()) break; solo_enc_thread_try(fh); try_to_freeze(); } remove_wait_queue(&solo_enc->thread_wait, &wait); return 0; } void solo_motion_isr(struct solo_dev *solo_dev) { u32 status; int i; solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_MOTION); status = solo_reg_read(solo_dev, SOLO_VI_MOT_STATUS); for (i = 0; i < solo_dev->nr_chans; i++) { struct solo_enc_dev *solo_enc = solo_dev->v4l2_enc[i]; BUG_ON(solo_enc == NULL); if (solo_enc->motion_detected) continue; if (!(status & (1 << i))) continue; solo_enc->motion_detected = 1; } } void solo_enc_v4l2_isr(struct solo_dev *solo_dev) { struct solo_enc_buf *enc_buf; u32 mpeg_current, mpeg_next, mpeg_size; u32 jpeg_current, jpeg_next, jpeg_size; u32 reg_mpeg_size; u8 cur_q, vop_type; u8 ch; enum solo_enc_types enc_type; solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_ENCODER); cur_q = ((solo_reg_read(solo_dev, SOLO_VE_STATE(11)) & 0xF) + 1) % MP4_QS; reg_mpeg_size = ((solo_reg_read(solo_dev, SOLO_VE_STATE(0)) & 0xFFFFF) + 64 + 8) & ~7; while (solo_dev->enc_idx != cur_q) { mpeg_current = solo_reg_read(solo_dev, SOLO_VE_MPEG4_QUE(solo_dev->enc_idx)); jpeg_current = solo_reg_read(solo_dev, SOLO_VE_JPEG_QUE(solo_dev->enc_idx)); solo_dev->enc_idx = (solo_dev->enc_idx + 1) % MP4_QS; mpeg_next = solo_reg_read(solo_dev, SOLO_VE_MPEG4_QUE(solo_dev->enc_idx)); jpeg_next = solo_reg_read(solo_dev, SOLO_VE_JPEG_QUE(solo_dev->enc_idx)); ch = (mpeg_current >> 24) & 0x1f; if (ch >= SOLO_MAX_CHANNELS) { ch -= SOLO_MAX_CHANNELS; enc_type = SOLO_ENC_TYPE_EXT; } else enc_type = SOLO_ENC_TYPE_STD; vop_type = (mpeg_current >> 29) & 3; mpeg_current &= 0x00ffffff; mpeg_next &= 0x00ffffff; jpeg_current &= 0x00ffffff; jpeg_next &= 0x00ffffff; mpeg_size = (SOLO_MP4E_EXT_SIZE(solo_dev) + mpeg_next - mpeg_current) % SOLO_MP4E_EXT_SIZE(solo_dev); jpeg_size = (SOLO_JPEG_EXT_SIZE(solo_dev) + jpeg_next - jpeg_current) % SOLO_JPEG_EXT_SIZE(solo_dev); /* XXX I think this means we had a ring overflow? */ if (mpeg_current > mpeg_next && mpeg_size != reg_mpeg_size) { enc_reset_gop(solo_dev, ch); continue; } /* When resetting the GOP, skip frames until I-frame */ if (enc_gop_reset(solo_dev, ch, vop_type)) continue; enc_buf = &solo_dev->enc_buf[solo_dev->enc_wr_idx]; enc_buf->vop = vop_type; enc_buf->ch = ch; enc_buf->off = mpeg_current; enc_buf->size = mpeg_size; enc_buf->jpeg_off = jpeg_current; enc_buf->jpeg_size = jpeg_size; enc_buf->type = enc_type; do_gettimeofday(&enc_buf->ts); solo_dev->enc_wr_idx = (solo_dev->enc_wr_idx + 1) % SOLO_NR_RING_BUFS; wake_up_interruptible(&solo_dev->v4l2_enc[ch]->thread_wait); } return; } static int solo_enc_buf_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { *size = FRAME_BUF_SIZE; if (*count < MIN_VID_BUFFERS) *count = MIN_VID_BUFFERS; return 0; } static int solo_enc_buf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct solo_enc_fh *fh = vq->priv_data; struct solo_enc_dev *solo_enc = fh->enc; vb->size = FRAME_BUF_SIZE; if (vb->baddr != 0 && vb->bsize < vb->size) return -EINVAL; /* These properties only change when queue is idle */ vb->width = solo_enc->width; vb->height = solo_enc->height; vb->field = field; if (vb->state == VIDEOBUF_NEEDS_INIT) { int rc = videobuf_iolock(vq, vb, NULL); if (rc < 0) { struct videobuf_dmabuf *dma = videobuf_to_dma(vb); videobuf_dma_unmap(vq->dev, dma); videobuf_dma_free(dma); vb->state = VIDEOBUF_NEEDS_INIT; return rc; } } vb->state = VIDEOBUF_PREPARED; return 0; } static void solo_enc_buf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct solo_enc_fh *fh = vq->priv_data; vb->state = VIDEOBUF_QUEUED; list_add_tail(&vb->queue, &fh->vidq_active); wake_up_interruptible(&fh->enc->thread_wait); } static void solo_enc_buf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct videobuf_dmabuf *dma = videobuf_to_dma(vb); videobuf_dma_unmap(vq->dev, dma); videobuf_dma_free(dma); vb->state = VIDEOBUF_NEEDS_INIT; } static struct videobuf_queue_ops solo_enc_video_qops = { .buf_setup = solo_enc_buf_setup, .buf_prepare = solo_enc_buf_prepare, .buf_queue = solo_enc_buf_queue, .buf_release = solo_enc_buf_release, }; static unsigned int solo_enc_poll(struct file *file, struct poll_table_struct *wait) { struct solo_enc_fh *fh = file->private_data; return videobuf_poll_stream(file, &fh->vidq, wait); } static int solo_enc_mmap(struct file *file, struct vm_area_struct *vma) { struct solo_enc_fh *fh = file->private_data; return videobuf_mmap_mapper(&fh->vidq, vma); } static int solo_enc_open(struct file *file) { struct solo_enc_dev *solo_enc = video_drvdata(file); struct solo_enc_fh *fh; fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (fh == NULL) return -ENOMEM; fh->enc = solo_enc; file->private_data = fh; INIT_LIST_HEAD(&fh->vidq_active); fh->fmt = V4L2_PIX_FMT_MPEG; fh->type = SOLO_ENC_TYPE_STD; videobuf_queue_sg_init(&fh->vidq, &solo_enc_video_qops, &solo_enc->solo_dev->pdev->dev, &solo_enc->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct videobuf_buffer), fh, NULL); return 0; } static ssize_t solo_enc_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct solo_enc_fh *fh = file->private_data; struct solo_enc_dev *solo_enc = fh->enc; /* Make sure the encoder is on */ if (!fh->enc_on) { int ret; spin_lock(&solo_enc->lock); ret = solo_enc_on(fh); spin_unlock(&solo_enc->lock); if (ret) return ret; ret = solo_start_fh_thread(fh); if (ret) return ret; } return videobuf_read_stream(&fh->vidq, data, count, ppos, 0, file->f_flags & O_NONBLOCK); } static int solo_enc_release(struct file *file) { struct solo_enc_fh *fh = file->private_data; struct solo_enc_dev *solo_enc = fh->enc; videobuf_stop(&fh->vidq); videobuf_mmap_free(&fh->vidq); spin_lock(&solo_enc->lock); solo_enc_off(fh); spin_unlock(&solo_enc->lock); kfree(fh); return 0; } static int solo_enc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; strcpy(cap->driver, SOLO6X10_NAME); snprintf(cap->card, sizeof(cap->card), "Softlogic 6x10 Enc %d", solo_enc->ch); snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI %s", pci_name(solo_dev->pdev)); cap->version = SOLO6X10_VER_NUM; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; return 0; } static int solo_enc_enum_input(struct file *file, void *priv, struct v4l2_input *input) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; if (input->index) return -EINVAL; snprintf(input->name, sizeof(input->name), "Encoder %d", solo_enc->ch + 1); input->type = V4L2_INPUT_TYPE_CAMERA; if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) input->std = V4L2_STD_NTSC_M; else input->std = V4L2_STD_PAL_B; if (!tw28_get_video_status(solo_dev, solo_enc->ch)) input->status = V4L2_IN_ST_NO_SIGNAL; return 0; } static int solo_enc_set_input(struct file *file, void *priv, unsigned int index) { if (index) return -EINVAL; return 0; } static int solo_enc_get_input(struct file *file, void *priv, unsigned int *index) { *index = 0; return 0; } static int solo_enc_enum_fmt_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { switch (f->index) { case 0: f->pixelformat = V4L2_PIX_FMT_MPEG; strcpy(f->description, "MPEG-4 AVC"); break; case 1: f->pixelformat = V4L2_PIX_FMT_MJPEG; strcpy(f->description, "MJPEG"); break; default: return -EINVAL; } f->flags = V4L2_FMT_FLAG_COMPRESSED; return 0; } static int solo_enc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; struct v4l2_pix_format *pix = &f->fmt.pix; if (pix->pixelformat != V4L2_PIX_FMT_MPEG && pix->pixelformat != V4L2_PIX_FMT_MJPEG) return -EINVAL; /* We cannot change width/height in mid read */ if (atomic_read(&solo_enc->readers) > 0) { if (pix->width != solo_enc->width || pix->height != solo_enc->height) return -EBUSY; } if (pix->width < solo_dev->video_hsize || pix->height < solo_dev->video_vsize << 1) { /* Default to CIF 1/2 size */ pix->width = solo_dev->video_hsize >> 1; pix->height = solo_dev->video_vsize; } else { /* Full frame */ pix->width = solo_dev->video_hsize; pix->height = solo_dev->video_vsize << 1; } if (pix->field == V4L2_FIELD_ANY) pix->field = V4L2_FIELD_INTERLACED; else if (pix->field != V4L2_FIELD_INTERLACED) pix->field = V4L2_FIELD_INTERLACED; /* Just set these */ pix->colorspace = V4L2_COLORSPACE_SMPTE170M; pix->sizeimage = FRAME_BUF_SIZE; return 0; } static int solo_enc_set_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; struct v4l2_pix_format *pix = &f->fmt.pix; int ret; spin_lock(&solo_enc->lock); ret = solo_enc_try_fmt_cap(file, priv, f); if (ret) { spin_unlock(&solo_enc->lock); return ret; } if (pix->width == solo_dev->video_hsize) solo_enc->mode = SOLO_ENC_MODE_D1; else solo_enc->mode = SOLO_ENC_MODE_CIF; /* This does not change the encoder at all */ fh->fmt = pix->pixelformat; if (pix->priv) fh->type = SOLO_ENC_TYPE_EXT; ret = solo_enc_on(fh); spin_unlock(&solo_enc->lock); if (ret) return ret; return solo_start_fh_thread(fh); } static int solo_enc_get_fmt_cap(struct file *file, void *priv, struct v4l2_format *f) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct v4l2_pix_format *pix = &f->fmt.pix; pix->width = solo_enc->width; pix->height = solo_enc->height; pix->pixelformat = fh->fmt; pix->field = solo_enc->interlaced ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE; pix->sizeimage = FRAME_BUF_SIZE; pix->colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int solo_enc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *req) { struct solo_enc_fh *fh = priv; return videobuf_reqbufs(&fh->vidq, req); } static int solo_enc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct solo_enc_fh *fh = priv; return videobuf_querybuf(&fh->vidq, buf); } static int solo_enc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct solo_enc_fh *fh = priv; return videobuf_qbuf(&fh->vidq, buf); } static int solo_enc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; int ret; /* Make sure the encoder is on */ if (!fh->enc_on) { spin_lock(&solo_enc->lock); ret = solo_enc_on(fh); spin_unlock(&solo_enc->lock); if (ret) return ret; ret = solo_start_fh_thread(fh); if (ret) return ret; } ret = videobuf_dqbuf(&fh->vidq, buf, file->f_flags & O_NONBLOCK); if (ret) return ret; /* Signal motion detection */ if (solo_is_motion_on(solo_enc)) { buf->flags |= V4L2_BUF_FLAG_MOTION_ON; if (solo_enc->motion_detected) { buf->flags |= V4L2_BUF_FLAG_MOTION_DETECTED; solo_reg_write(solo_enc->solo_dev, SOLO_VI_MOT_CLEAR, 1 << solo_enc->ch); solo_enc->motion_detected = 0; } } /* Check for key frame on mpeg data */ if (fh->fmt == V4L2_PIX_FMT_MPEG) { struct videobuf_dmabuf *vbuf = videobuf_to_dma(fh->vidq.bufs[buf->index]); if (vbuf) { u8 *p = sg_virt(vbuf->sglist); if (p[3] == 0x00) buf->flags |= V4L2_BUF_FLAG_KEYFRAME; else buf->flags |= V4L2_BUF_FLAG_PFRAME; } } return 0; } static int solo_enc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct solo_enc_fh *fh = priv; if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; return videobuf_streamon(&fh->vidq); } static int solo_enc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct solo_enc_fh *fh = priv; if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; return videobuf_streamoff(&fh->vidq); } static int solo_enc_s_std(struct file *file, void *priv, v4l2_std_id *i) { return 0; } static int solo_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { struct solo_enc_fh *fh = priv; struct solo_dev *solo_dev = fh->enc->solo_dev; if (fsize->pixel_format != V4L2_PIX_FMT_MPEG) return -EINVAL; switch (fsize->index) { case 0: fsize->discrete.width = solo_dev->video_hsize >> 1; fsize->discrete.height = solo_dev->video_vsize; break; case 1: fsize->discrete.width = solo_dev->video_hsize; fsize->discrete.height = solo_dev->video_vsize << 1; break; default: return -EINVAL; } fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; return 0; } static int solo_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fintv) { struct solo_enc_fh *fh = priv; struct solo_dev *solo_dev = fh->enc->solo_dev; if (fintv->pixel_format != V4L2_PIX_FMT_MPEG || fintv->index) return -EINVAL; fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE; fintv->stepwise.min.numerator = solo_dev->fps; fintv->stepwise.min.denominator = 1; fintv->stepwise.max.numerator = solo_dev->fps; fintv->stepwise.max.denominator = 15; fintv->stepwise.step.numerator = 1; fintv->stepwise.step.denominator = 1; return 0; } static int solo_g_parm(struct file *file, void *priv, struct v4l2_streamparm *sp) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; struct v4l2_captureparm *cp = &sp->parm.capture; cp->capability = V4L2_CAP_TIMEPERFRAME; cp->timeperframe.numerator = solo_enc->interval; cp->timeperframe.denominator = solo_dev->fps; cp->capturemode = 0; /* XXX: Shouldn't we be able to get/set this from videobuf? */ cp->readbuffers = 2; return 0; } static int solo_s_parm(struct file *file, void *priv, struct v4l2_streamparm *sp) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; struct v4l2_captureparm *cp = &sp->parm.capture; spin_lock(&solo_enc->lock); if (atomic_read(&solo_enc->readers) > 0) { spin_unlock(&solo_enc->lock); return -EBUSY; } if ((cp->timeperframe.numerator == 0) || (cp->timeperframe.denominator == 0)) { /* reset framerate */ cp->timeperframe.numerator = 1; cp->timeperframe.denominator = solo_dev->fps; } if (cp->timeperframe.denominator != solo_dev->fps) cp->timeperframe.denominator = solo_dev->fps; if (cp->timeperframe.numerator > 15) cp->timeperframe.numerator = 15; solo_enc->interval = cp->timeperframe.numerator; cp->capability = V4L2_CAP_TIMEPERFRAME; solo_enc->gop = max(solo_dev->fps / solo_enc->interval, 1); solo_update_mode(solo_enc); spin_unlock(&solo_enc->lock); return 0; } static int solo_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; qc->id = v4l2_ctrl_next(solo_ctrl_classes, qc->id); if (!qc->id) return -EINVAL; switch (qc->id) { case V4L2_CID_BRIGHTNESS: case V4L2_CID_CONTRAST: case V4L2_CID_SATURATION: case V4L2_CID_HUE: return v4l2_ctrl_query_fill(qc, 0x00, 0xff, 1, 0x80); case V4L2_CID_SHARPNESS: return v4l2_ctrl_query_fill(qc, 0x00, 0x0f, 1, 0x00); case V4L2_CID_MPEG_VIDEO_ENCODING: return v4l2_ctrl_query_fill( qc, V4L2_MPEG_VIDEO_ENCODING_MPEG_1, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 1, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC); case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return v4l2_ctrl_query_fill(qc, 1, 255, 1, solo_dev->fps); #ifdef PRIVATE_CIDS case V4L2_CID_MOTION_THRESHOLD: qc->flags |= V4L2_CTRL_FLAG_SLIDER; qc->type = V4L2_CTRL_TYPE_INTEGER; qc->minimum = 0; qc->maximum = 0xffff; qc->step = 1; qc->default_value = SOLO_DEF_MOT_THRESH; strlcpy(qc->name, "Motion Detection Threshold", sizeof(qc->name)); return 0; case V4L2_CID_MOTION_ENABLE: qc->type = V4L2_CTRL_TYPE_BOOLEAN; qc->minimum = 0; qc->maximum = qc->step = 1; qc->default_value = 0; strlcpy(qc->name, "Motion Detection Enable", sizeof(qc->name)); return 0; #else case V4L2_CID_MOTION_THRESHOLD: return v4l2_ctrl_query_fill(qc, 0, 0xffff, 1, SOLO_DEF_MOT_THRESH); case V4L2_CID_MOTION_ENABLE: return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0); #endif case V4L2_CID_RDS_TX_RADIO_TEXT: qc->type = V4L2_CTRL_TYPE_STRING; qc->minimum = 0; qc->maximum = OSD_TEXT_MAX; qc->step = 1; qc->default_value = 0; strlcpy(qc->name, "OSD Text", sizeof(qc->name)); return 0; } return -EINVAL; } static int solo_querymenu(struct file *file, void *priv, struct v4l2_querymenu *qmenu) { struct v4l2_queryctrl qctrl; int err; qctrl.id = qmenu->id; err = solo_queryctrl(file, priv, &qctrl); if (err) return err; return v4l2_ctrl_query_menu(qmenu, &qctrl, NULL); } static int solo_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: case V4L2_CID_CONTRAST: case V4L2_CID_SATURATION: case V4L2_CID_HUE: case V4L2_CID_SHARPNESS: return tw28_get_ctrl_val(solo_dev, ctrl->id, solo_enc->ch, &ctrl->value); case V4L2_CID_MPEG_VIDEO_ENCODING: ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC; break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: ctrl->value = solo_enc->gop; break; case V4L2_CID_MOTION_THRESHOLD: ctrl->value = solo_enc->motion_thresh; break; case V4L2_CID_MOTION_ENABLE: ctrl->value = solo_is_motion_on(solo_enc); break; default: return -EINVAL; } return 0; } static int solo_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; struct solo_dev *solo_dev = solo_enc->solo_dev; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: case V4L2_CID_CONTRAST: case V4L2_CID_SATURATION: case V4L2_CID_HUE: case V4L2_CID_SHARPNESS: return tw28_set_ctrl_val(solo_dev, ctrl->id, solo_enc->ch, ctrl->value); case V4L2_CID_MPEG_VIDEO_ENCODING: if (ctrl->value != V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC) return -ERANGE; break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: if (ctrl->value < 1 || ctrl->value > 255) return -ERANGE; solo_enc->gop = ctrl->value; solo_reg_write(solo_dev, SOLO_VE_CH_GOP(solo_enc->ch), solo_enc->gop); solo_reg_write(solo_dev, SOLO_VE_CH_GOP_E(solo_enc->ch), solo_enc->gop); break; case V4L2_CID_MOTION_THRESHOLD: /* TODO accept value on lower 16-bits and use high * 16-bits to assign the value to a specific block */ if (ctrl->value < 0 || ctrl->value > 0xffff) return -ERANGE; solo_enc->motion_thresh = ctrl->value; solo_set_motion_threshold(solo_dev, solo_enc->ch, ctrl->value); break; case V4L2_CID_MOTION_ENABLE: solo_motion_toggle(solo_enc, ctrl->value); break; default: return -EINVAL; } return 0; } static int solo_s_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *ctrls) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; int i; for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = (ctrls->controls + i); int err; switch (ctrl->id) { case V4L2_CID_RDS_TX_RADIO_TEXT: if (ctrl->size - 1 > OSD_TEXT_MAX) err = -ERANGE; else { err = copy_from_user(solo_enc->osd_text, ctrl->string, OSD_TEXT_MAX); solo_enc->osd_text[OSD_TEXT_MAX] = '\0'; if (!err) err = solo_osd_print(solo_enc); } break; default: err = -EINVAL; } if (err < 0) { ctrls->error_idx = i; return err; } } return 0; } static int solo_g_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *ctrls) { struct solo_enc_fh *fh = priv; struct solo_enc_dev *solo_enc = fh->enc; int i; for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = (ctrls->controls + i); int err; switch (ctrl->id) { case V4L2_CID_RDS_TX_RADIO_TEXT: if (ctrl->size < OSD_TEXT_MAX) { ctrl->size = OSD_TEXT_MAX; err = -ENOSPC; } else { err = copy_to_user(ctrl->string, solo_enc->osd_text, OSD_TEXT_MAX); } break; default: err = -EINVAL; } if (err < 0) { ctrls->error_idx = i; return err; } } return 0; } static const struct v4l2_file_operations solo_enc_fops = { .owner = THIS_MODULE, .open = solo_enc_open, .release = solo_enc_release, .read = solo_enc_read, .poll = solo_enc_poll, .mmap = solo_enc_mmap, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops solo_enc_ioctl_ops = { .vidioc_querycap = solo_enc_querycap, .vidioc_s_std = solo_enc_s_std, /* Input callbacks */ .vidioc_enum_input = solo_enc_enum_input, .vidioc_s_input = solo_enc_set_input, .vidioc_g_input = solo_enc_get_input, /* Video capture format callbacks */ .vidioc_enum_fmt_vid_cap = solo_enc_enum_fmt_cap, .vidioc_try_fmt_vid_cap = solo_enc_try_fmt_cap, .vidioc_s_fmt_vid_cap = solo_enc_set_fmt_cap, .vidioc_g_fmt_vid_cap = solo_enc_get_fmt_cap, /* Streaming I/O */ .vidioc_reqbufs = solo_enc_reqbufs, .vidioc_querybuf = solo_enc_querybuf, .vidioc_qbuf = solo_enc_qbuf, .vidioc_dqbuf = solo_enc_dqbuf, .vidioc_streamon = solo_enc_streamon, .vidioc_streamoff = solo_enc_streamoff, /* Frame size and interval */ .vidioc_enum_framesizes = solo_enum_framesizes, .vidioc_enum_frameintervals = solo_enum_frameintervals, /* Video capture parameters */ .vidioc_s_parm = solo_s_parm, .vidioc_g_parm = solo_g_parm, /* Controls */ .vidioc_queryctrl = solo_queryctrl, .vidioc_querymenu = solo_querymenu, .vidioc_g_ctrl = solo_g_ctrl, .vidioc_s_ctrl = solo_s_ctrl, .vidioc_g_ext_ctrls = solo_g_ext_ctrls, .vidioc_s_ext_ctrls = solo_s_ext_ctrls, }; static struct video_device solo_enc_template = { .name = SOLO6X10_NAME, .fops = &solo_enc_fops, .ioctl_ops = &solo_enc_ioctl_ops, .minor = -1, .release = video_device_release, .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_B, .current_norm = V4L2_STD_NTSC_M, }; static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev, u8 ch) { struct solo_enc_dev *solo_enc; int ret; solo_enc = kzalloc(sizeof(*solo_enc), GFP_KERNEL); if (!solo_enc) return ERR_PTR(-ENOMEM); solo_enc->vfd = video_device_alloc(); if (!solo_enc->vfd) { kfree(solo_enc); return ERR_PTR(-ENOMEM); } solo_enc->solo_dev = solo_dev; solo_enc->ch = ch; *solo_enc->vfd = solo_enc_template; solo_enc->vfd->parent = &solo_dev->pdev->dev; ret = video_register_device(solo_enc->vfd, VFL_TYPE_GRABBER, video_nr); if (ret < 0) { video_device_release(solo_enc->vfd); kfree(solo_enc); return ERR_PTR(ret); } video_set_drvdata(solo_enc->vfd, solo_enc); snprintf(solo_enc->vfd->name, sizeof(solo_enc->vfd->name), "%s-enc (%i/%i)", SOLO6X10_NAME, solo_dev->vfd->num, solo_enc->vfd->num); if (video_nr != -1) video_nr++; spin_lock_init(&solo_enc->lock); init_waitqueue_head(&solo_enc->thread_wait); atomic_set(&solo_enc->readers, 0); solo_enc->qp = SOLO_DEFAULT_QP; solo_enc->gop = solo_dev->fps; solo_enc->interval = 1; solo_enc->mode = SOLO_ENC_MODE_CIF; solo_enc->motion_thresh = SOLO_DEF_MOT_THRESH; spin_lock(&solo_enc->lock); solo_update_mode(solo_enc); spin_unlock(&solo_enc->lock); return solo_enc; } static void solo_enc_free(struct solo_enc_dev *solo_enc) { if (solo_enc == NULL) return; video_unregister_device(solo_enc->vfd); kfree(solo_enc); } int solo_enc_v4l2_init(struct solo_dev *solo_dev) { int i; for (i = 0; i < solo_dev->nr_chans; i++) { solo_dev->v4l2_enc[i] = solo_enc_alloc(solo_dev, i); if (IS_ERR(solo_dev->v4l2_enc[i])) break; } if (i != solo_dev->nr_chans) { int ret = PTR_ERR(solo_dev->v4l2_enc[i]); while (i--) solo_enc_free(solo_dev->v4l2_enc[i]); return ret; } /* D1@MAX-FPS * 4 */ solo_dev->enc_bw_remain = solo_dev->fps * 4 * 4; dev_info(&solo_dev->pdev->dev, "Encoders as /dev/video%d-%d\n", solo_dev->v4l2_enc[0]->vfd->num, solo_dev->v4l2_enc[solo_dev->nr_chans - 1]->vfd->num); return 0; } void solo_enc_v4l2_exit(struct solo_dev *solo_dev) { int i; solo_irq_off(solo_dev, SOLO_IRQ_MOTION); for (i = 0; i < solo_dev->nr_chans; i++) solo_enc_free(solo_dev->v4l2_enc[i]); }
gpl-2.0
speedbot/android_kernel_sony_Nicki
crypto/blowfish_generic.c
5177
3516
/* * Cryptographic API. * * Blowfish Cipher Algorithm, by Bruce Schneier. * http://www.counterpane.com/blowfish.html * * Adapted from Kerneli implementation. * * Copyright (c) Herbert Valerio Riedel <hvr@hvrlab.org> * Copyright (c) Kyle McMartin <kyle@debian.org> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #include <crypto/blowfish.h> /* * Round loop unrolling macros, S is a pointer to a S-Box array * organized in 4 unsigned longs at a row. */ #define GET32_3(x) (((x) & 0xff)) #define GET32_2(x) (((x) >> (8)) & (0xff)) #define GET32_1(x) (((x) >> (16)) & (0xff)) #define GET32_0(x) (((x) >> (24)) & (0xff)) #define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \ S[512 + GET32_2(x)]) + S[768 + GET32_3(x)]) #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *in_blk = (const __be32 *)src; __be32 *const out_blk = (__be32 *)dst; const u32 *P = ctx->p; const u32 *S = ctx->s; u32 yl = be32_to_cpu(in_blk[0]); u32 yr = be32_to_cpu(in_blk[1]); ROUND(yr, yl, 0); ROUND(yl, yr, 1); ROUND(yr, yl, 2); ROUND(yl, yr, 3); ROUND(yr, yl, 4); ROUND(yl, yr, 5); ROUND(yr, yl, 6); ROUND(yl, yr, 7); ROUND(yr, yl, 8); ROUND(yl, yr, 9); ROUND(yr, yl, 10); ROUND(yl, yr, 11); ROUND(yr, yl, 12); ROUND(yl, yr, 13); ROUND(yr, yl, 14); ROUND(yl, yr, 15); yl ^= P[16]; yr ^= P[17]; out_blk[0] = cpu_to_be32(yr); out_blk[1] = cpu_to_be32(yl); } static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *in_blk = (const __be32 *)src; __be32 *const out_blk = (__be32 *)dst; const u32 *P = ctx->p; const u32 *S = ctx->s; u32 yl = be32_to_cpu(in_blk[0]); u32 yr = be32_to_cpu(in_blk[1]); ROUND(yr, yl, 17); ROUND(yl, yr, 16); ROUND(yr, yl, 15); ROUND(yl, yr, 14); ROUND(yr, yl, 13); ROUND(yl, yr, 12); ROUND(yr, yl, 11); ROUND(yl, yr, 10); ROUND(yr, yl, 9); ROUND(yl, yr, 8); ROUND(yr, yl, 7); ROUND(yl, yr, 6); ROUND(yr, yl, 5); ROUND(yl, yr, 4); ROUND(yr, yl, 3); ROUND(yl, yr, 2); yl ^= P[1]; yr ^= P[0]; out_blk[0] = cpu_to_be32(yr); out_blk[1] = cpu_to_be32(yl); } static struct crypto_alg alg = { .cra_name = "blowfish", .cra_driver_name = "blowfish-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = BF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bf_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = BF_MIN_KEY_SIZE, .cia_max_keysize = BF_MAX_KEY_SIZE, .cia_setkey = blowfish_setkey, .cia_encrypt = bf_encrypt, .cia_decrypt = bf_decrypt } } }; static int __init blowfish_mod_init(void) { return crypto_register_alg(&alg); } static void __exit blowfish_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(blowfish_mod_init); module_exit(blowfish_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); MODULE_ALIAS("blowfish");
gpl-2.0
maz-1/android_kernel_sonyz_msm8974
drivers/media/dvb/frontends/stv6110.c
5689
10795
/* * stv6110.c * * Driver for ST STV6110 satellite tuner IC. * * Copyright (C) 2009 NetUP Inc. * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dvb/frontend.h> #include <linux/types.h> #include "stv6110.h" static int debug; struct stv6110_priv { int i2c_address; struct i2c_adapter *i2c; u32 mclk; u8 clk_div; u8 gain; u8 regs[8]; }; #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG args); \ } while (0) static s32 abssub(s32 a, s32 b) { if (a > b) return a - b; else return b - a; }; static int stv6110_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[], int start, int len) { struct stv6110_priv *priv = fe->tuner_priv; int rc; u8 cmdbuf[len + 1]; struct i2c_msg msg = { .addr = priv->i2c_address, .flags = 0, .buf = cmdbuf, .len = len + 1 }; dprintk("%s\n", __func__); if (start + len > 8) return -EINVAL; memcpy(&cmdbuf[1], buf, len); cmdbuf[0] = start; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); rc = i2c_transfer(priv->i2c, &msg, 1); if (rc != 1) dprintk("%s: i2c error\n", __func__); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return 0; } static int stv6110_read_regs(struct dvb_frontend *fe, u8 regs[], int start, int len) { struct stv6110_priv *priv = fe->tuner_priv; int rc; u8 reg[] = { start }; struct i2c_msg msg[] = { { .addr = priv->i2c_address, .flags = 0, .buf = reg, .len = 1, }, { .addr = priv->i2c_address, .flags = I2C_M_RD, .buf = regs, .len = len, }, }; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); rc = i2c_transfer(priv->i2c, msg, 2); if (rc != 2) dprintk("%s: i2c error\n", __func__); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); memcpy(&priv->regs[start], regs, len); return 0; } static int stv6110_read_reg(struct dvb_frontend *fe, int start) { u8 buf[] = { 0 }; stv6110_read_regs(fe, buf, start, 1); return buf[0]; } static int stv6110_sleep(struct dvb_frontend *fe) { u8 reg[] = { 0 }; stv6110_write_regs(fe, reg, 0, 1); return 0; } static u32 carrier_width(u32 symbol_rate, fe_rolloff_t rolloff) { u32 rlf; switch (rolloff) { case ROLLOFF_20: rlf = 20; break; case ROLLOFF_25: rlf = 25; break; default: rlf = 35; break; } return symbol_rate + ((symbol_rate * rlf) / 100); } static int stv6110_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth) { struct stv6110_priv *priv = fe->tuner_priv; u8 r8, ret = 0x04; int i; if ((bandwidth / 2) > 36000000) /*BW/2 max=31+5=36 mhz for r8=31*/ r8 = 31; else if ((bandwidth / 2) < 5000000) /* BW/2 min=5Mhz for F=0 */ r8 = 0; else /*if 5 < BW/2 < 36*/ r8 = (bandwidth / 2) / 1000000 - 5; /* ctrl3, RCCLKOFF = 0 Activate the calibration Clock */ /* ctrl3, CF = r8 Set the LPF value */ priv->regs[RSTV6110_CTRL3] &= ~((1 << 6) | 0x1f); priv->regs[RSTV6110_CTRL3] |= (r8 & 0x1f); stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL3], RSTV6110_CTRL3, 1); /* stat1, CALRCSTRT = 1 Start LPF auto calibration*/ priv->regs[RSTV6110_STAT1] |= 0x02; stv6110_write_regs(fe, &priv->regs[RSTV6110_STAT1], RSTV6110_STAT1, 1); i = 0; /* Wait for CALRCSTRT == 0 */ while ((i < 10) && (ret != 0)) { ret = ((stv6110_read_reg(fe, RSTV6110_STAT1)) & 0x02); mdelay(1); /* wait for LPF auto calibration */ i++; } /* RCCLKOFF = 1 calibration done, desactivate the calibration Clock */ priv->regs[RSTV6110_CTRL3] |= (1 << 6); stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL3], RSTV6110_CTRL3, 1); return 0; } static int stv6110_init(struct dvb_frontend *fe) { struct stv6110_priv *priv = fe->tuner_priv; u8 buf0[] = { 0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e }; memcpy(priv->regs, buf0, 8); /* K = (Reference / 1000000) - 16 */ priv->regs[RSTV6110_CTRL1] &= ~(0x1f << 3); priv->regs[RSTV6110_CTRL1] |= ((((priv->mclk / 1000000) - 16) & 0x1f) << 3); /* divisor value for the output clock */ priv->regs[RSTV6110_CTRL2] &= ~0xc0; priv->regs[RSTV6110_CTRL2] |= (priv->clk_div << 6); stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL1], RSTV6110_CTRL1, 8); msleep(1); stv6110_set_bandwidth(fe, 72000000); return 0; } static int stv6110_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct stv6110_priv *priv = fe->tuner_priv; u32 nbsteps, divider, psd2, freq; u8 regs[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; stv6110_read_regs(fe, regs, 0, 8); /*N*/ divider = (priv->regs[RSTV6110_TUNING2] & 0x0f) << 8; divider += priv->regs[RSTV6110_TUNING1]; /*R*/ nbsteps = (priv->regs[RSTV6110_TUNING2] >> 6) & 3; /*p*/ psd2 = (priv->regs[RSTV6110_TUNING2] >> 4) & 1; freq = divider * (priv->mclk / 1000); freq /= (1 << (nbsteps + psd2)); freq /= 4; *frequency = freq; return 0; } static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency) { struct stv6110_priv *priv = fe->tuner_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 ret = 0x04; u32 divider, ref, p, presc, i, result_freq, vco_freq; s32 p_calc, p_calc_opt = 1000, r_div, r_div_opt = 0, p_val; s32 srate; dprintk("%s, freq=%d kHz, mclk=%d Hz\n", __func__, frequency, priv->mclk); /* K = (Reference / 1000000) - 16 */ priv->regs[RSTV6110_CTRL1] &= ~(0x1f << 3); priv->regs[RSTV6110_CTRL1] |= ((((priv->mclk / 1000000) - 16) & 0x1f) << 3); /* BB_GAIN = db/2 */ if (fe->ops.set_property && fe->ops.get_property) { srate = c->symbol_rate; dprintk("%s: Get Frontend parameters: srate=%d\n", __func__, srate); } else srate = 15000000; priv->regs[RSTV6110_CTRL2] &= ~0x0f; priv->regs[RSTV6110_CTRL2] |= (priv->gain & 0x0f); if (frequency <= 1023000) { p = 1; presc = 0; } else if (frequency <= 1300000) { p = 1; presc = 1; } else if (frequency <= 2046000) { p = 0; presc = 0; } else { p = 0; presc = 1; } /* DIV4SEL = p*/ priv->regs[RSTV6110_TUNING2] &= ~(1 << 4); priv->regs[RSTV6110_TUNING2] |= (p << 4); /* PRESC32ON = presc */ priv->regs[RSTV6110_TUNING2] &= ~(1 << 5); priv->regs[RSTV6110_TUNING2] |= (presc << 5); p_val = (int)(1 << (p + 1)) * 10;/* P = 2 or P = 4 */ for (r_div = 0; r_div <= 3; r_div++) { p_calc = (priv->mclk / 100000); p_calc /= (1 << (r_div + 1)); if ((abssub(p_calc, p_val)) < (abssub(p_calc_opt, p_val))) r_div_opt = r_div; p_calc_opt = (priv->mclk / 100000); p_calc_opt /= (1 << (r_div_opt + 1)); } ref = priv->mclk / ((1 << (r_div_opt + 1)) * (1 << (p + 1))); divider = (((frequency * 1000) + (ref >> 1)) / ref); /* RDIV = r_div_opt */ priv->regs[RSTV6110_TUNING2] &= ~(3 << 6); priv->regs[RSTV6110_TUNING2] |= (((r_div_opt) & 3) << 6); /* NDIV_MSB = MSB(divider) */ priv->regs[RSTV6110_TUNING2] &= ~0x0f; priv->regs[RSTV6110_TUNING2] |= (((divider) >> 8) & 0x0f); /* NDIV_LSB, LSB(divider) */ priv->regs[RSTV6110_TUNING1] = (divider & 0xff); /* CALVCOSTRT = 1 VCO Auto Calibration */ priv->regs[RSTV6110_STAT1] |= 0x04; stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL1], RSTV6110_CTRL1, 8); i = 0; /* Wait for CALVCOSTRT == 0 */ while ((i < 10) && (ret != 0)) { ret = ((stv6110_read_reg(fe, RSTV6110_STAT1)) & 0x04); msleep(1); /* wait for VCO auto calibration */ i++; } ret = stv6110_read_reg(fe, RSTV6110_STAT1); stv6110_get_frequency(fe, &result_freq); vco_freq = divider * ((priv->mclk / 1000) / ((1 << (r_div_opt + 1)))); dprintk("%s, stat1=%x, lo_freq=%d kHz, vco_frec=%d kHz\n", __func__, ret, result_freq, vco_freq); return 0; } static int stv6110_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 bandwidth = carrier_width(c->symbol_rate, c->rolloff); stv6110_set_frequency(fe, c->frequency); stv6110_set_bandwidth(fe, bandwidth); return 0; } static int stv6110_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct stv6110_priv *priv = fe->tuner_priv; u8 r8 = 0; u8 regs[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; stv6110_read_regs(fe, regs, 0, 8); /* CF */ r8 = priv->regs[RSTV6110_CTRL3] & 0x1f; *bandwidth = (r8 + 5) * 2000000;/* x2 for ZIF tuner BW/2 = F+5 Mhz */ return 0; } static struct dvb_tuner_ops stv6110_tuner_ops = { .info = { .name = "ST STV6110", .frequency_min = 950000, .frequency_max = 2150000, .frequency_step = 1000, }, .init = stv6110_init, .release = stv6110_release, .sleep = stv6110_sleep, .set_params = stv6110_set_params, .get_frequency = stv6110_get_frequency, .set_frequency = stv6110_set_frequency, .get_bandwidth = stv6110_get_bandwidth, .set_bandwidth = stv6110_set_bandwidth, }; struct dvb_frontend *stv6110_attach(struct dvb_frontend *fe, const struct stv6110_config *config, struct i2c_adapter *i2c) { struct stv6110_priv *priv = NULL; u8 reg0[] = { 0x00, 0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e }; struct i2c_msg msg[] = { { .addr = config->i2c_address, .flags = 0, .buf = reg0, .len = 9 } }; int ret; /* divisor value for the output clock */ reg0[2] &= ~0xc0; reg0[2] |= (config->clk_div << 6); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer(i2c, msg, 1); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); if (ret != 1) return NULL; priv = kzalloc(sizeof(struct stv6110_priv), GFP_KERNEL); if (priv == NULL) return NULL; priv->i2c_address = config->i2c_address; priv->i2c = i2c; priv->mclk = config->mclk; priv->clk_div = config->clk_div; priv->gain = config->gain; memcpy(&priv->regs, &reg0[1], 8); memcpy(&fe->ops.tuner_ops, &stv6110_tuner_ops, sizeof(struct dvb_tuner_ops)); fe->tuner_priv = priv; printk(KERN_INFO "STV6110 attached on addr=%x!\n", priv->i2c_address); return fe; } EXPORT_SYMBOL(stv6110_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("ST STV6110 driver"); MODULE_AUTHOR("Igor M. Liplianin"); MODULE_LICENSE("GPL");
gpl-2.0
kiriapurv/kiriyard-mako-kernel
sound/usb/usx2y/usb_stream.c
7225
19509
/* * Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/usb.h> #include <linux/gfp.h> #include "usb_stream.h" /* setup */ static unsigned usb_stream_next_packet_size(struct usb_stream_kernel *sk) { struct usb_stream *s = sk->s; sk->out_phase_peeked = (sk->out_phase & 0xffff) + sk->freqn; return (sk->out_phase_peeked >> 16) * s->cfg.frame_size; } static void playback_prep_freqn(struct usb_stream_kernel *sk, struct urb *urb) { struct usb_stream *s = sk->s; int pack, lb = 0; for (pack = 0; pack < sk->n_o_ps; pack++) { int l = usb_stream_next_packet_size(sk); if (s->idle_outsize + lb + l > s->period_size) goto check; sk->out_phase = sk->out_phase_peeked; urb->iso_frame_desc[pack].offset = lb; urb->iso_frame_desc[pack].length = l; lb += l; } snd_printdd(KERN_DEBUG "%i\n", lb); check: urb->number_of_packets = pack; urb->transfer_buffer_length = lb; s->idle_outsize += lb - s->period_size; snd_printdd(KERN_DEBUG "idle=%i ul=%i ps=%i\n", s->idle_outsize, lb, s->period_size); } static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize, struct urb **urbs, char *transfer, struct usb_device *dev, int pipe) { int u, p; int maxpacket = use_packsize ? use_packsize : usb_maxpacket(dev, pipe, usb_pipeout(pipe)); int transfer_length = maxpacket * sk->n_o_ps; for (u = 0; u < USB_STREAM_NURBS; ++u, transfer += transfer_length) { struct urb *urb = urbs[u]; struct usb_iso_packet_descriptor *desc; urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = transfer; urb->dev = dev; urb->pipe = pipe; urb->number_of_packets = sk->n_o_ps; urb->context = sk; urb->interval = 1; if (usb_pipeout(pipe)) continue; urb->transfer_buffer_length = transfer_length; desc = urb->iso_frame_desc; desc->offset = 0; desc->length = maxpacket; for (p = 1; p < sk->n_o_ps; ++p) { desc[p].offset = desc[p - 1].offset + maxpacket; desc[p].length = maxpacket; } } } static void init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize, struct usb_device *dev, int in_pipe, int out_pipe) { struct usb_stream *s = sk->s; char *indata = (char *)s + sizeof(*s) + sizeof(struct usb_stream_packet) * s->inpackets; int u; for (u = 0; u < USB_STREAM_NURBS; ++u) { sk->inurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL); sk->outurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL); } init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe); init_pipe_urbs(sk, use_packsize, sk->outurb, sk->write_page, dev, out_pipe); } /* * convert a sampling rate into our full speed format (fs/1000 in Q16.16) * this will overflow at approx 524 kHz */ static inline unsigned get_usb_full_speed_rate(unsigned rate) { return ((rate << 13) + 62) / 125; } /* * convert a sampling rate into USB high speed format (fs/8000 in Q16.16) * this will overflow at approx 4 MHz */ static inline unsigned get_usb_high_speed_rate(unsigned rate) { return ((rate << 10) + 62) / 125; } void usb_stream_free(struct usb_stream_kernel *sk) { struct usb_stream *s; unsigned u; for (u = 0; u < USB_STREAM_NURBS; ++u) { usb_free_urb(sk->inurb[u]); sk->inurb[u] = NULL; usb_free_urb(sk->outurb[u]); sk->outurb[u] = NULL; } s = sk->s; if (!s) return; free_pages((unsigned long)sk->write_page, get_order(s->write_size)); sk->write_page = NULL; free_pages((unsigned long)s, get_order(s->read_size)); sk->s = NULL; } struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk, struct usb_device *dev, unsigned in_endpoint, unsigned out_endpoint, unsigned sample_rate, unsigned use_packsize, unsigned period_frames, unsigned frame_size) { int packets, max_packsize; int in_pipe, out_pipe; int read_size = sizeof(struct usb_stream); int write_size; int usb_frames = dev->speed == USB_SPEED_HIGH ? 8000 : 1000; int pg; in_pipe = usb_rcvisocpipe(dev, in_endpoint); out_pipe = usb_sndisocpipe(dev, out_endpoint); max_packsize = use_packsize ? use_packsize : usb_maxpacket(dev, in_pipe, 0); /* t_period = period_frames / sample_rate iso_packs = t_period / t_iso_frame = (period_frames / sample_rate) * (1 / t_iso_frame) */ packets = period_frames * usb_frames / sample_rate + 1; if (dev->speed == USB_SPEED_HIGH) packets = (packets + 7) & ~7; read_size += packets * USB_STREAM_URBDEPTH * (max_packsize + sizeof(struct usb_stream_packet)); max_packsize = usb_maxpacket(dev, out_pipe, 1); write_size = max_packsize * packets * USB_STREAM_URBDEPTH; if (read_size >= 256*PAGE_SIZE || write_size >= 256*PAGE_SIZE) { snd_printk(KERN_WARNING "a size exceeds 128*PAGE_SIZE\n"); goto out; } pg = get_order(read_size); sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); if (!sk->s) { snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); goto out; } sk->s->cfg.version = USB_STREAM_INTERFACE_VERSION; sk->s->read_size = read_size; sk->s->cfg.sample_rate = sample_rate; sk->s->cfg.frame_size = frame_size; sk->n_o_ps = packets; sk->s->inpackets = packets * USB_STREAM_URBDEPTH; sk->s->cfg.period_frames = period_frames; sk->s->period_size = frame_size * period_frames; sk->s->write_size = write_size; pg = get_order(write_size); sk->write_page = (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); if (!sk->write_page) { snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); usb_stream_free(sk); return NULL; } /* calculate the frequency in 16.16 format */ if (dev->speed == USB_SPEED_FULL) sk->freqn = get_usb_full_speed_rate(sample_rate); else sk->freqn = get_usb_high_speed_rate(sample_rate); init_urbs(sk, use_packsize, dev, in_pipe, out_pipe); sk->s->state = usb_stream_stopped; out: return sk->s; } /* start */ static bool balance_check(struct usb_stream_kernel *sk, struct urb *urb) { bool r; if (unlikely(urb->status)) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT) snd_printk(KERN_WARNING "status=%i\n", urb->status); sk->iso_frame_balance = 0x7FFFFFFF; return false; } r = sk->iso_frame_balance == 0; if (!r) sk->i_urb = urb; return r; } static bool balance_playback(struct usb_stream_kernel *sk, struct urb *urb) { sk->iso_frame_balance += urb->number_of_packets; return balance_check(sk, urb); } static bool balance_capture(struct usb_stream_kernel *sk, struct urb *urb) { sk->iso_frame_balance -= urb->number_of_packets; return balance_check(sk, urb); } static void subs_set_complete(struct urb **urbs, void (*complete)(struct urb *)) { int u; for (u = 0; u < USB_STREAM_NURBS; u++) { struct urb *urb = urbs[u]; urb->complete = complete; } } static int usb_stream_prepare_playback(struct usb_stream_kernel *sk, struct urb *inurb) { struct usb_stream *s = sk->s; struct urb *io; struct usb_iso_packet_descriptor *id, *od; int p = 0, lb = 0, l = 0; io = sk->idle_outurb; od = io->iso_frame_desc; for (; s->sync_packet < 0; ++p, ++s->sync_packet) { struct urb *ii = sk->completed_inurb; id = ii->iso_frame_desc + ii->number_of_packets + s->sync_packet; l = id->actual_length; od[p].length = l; od[p].offset = lb; lb += l; } for (; s->sync_packet < inurb->number_of_packets && p < sk->n_o_ps; ++p, ++s->sync_packet) { l = inurb->iso_frame_desc[s->sync_packet].actual_length; if (s->idle_outsize + lb + l > s->period_size) goto check_ok; od[p].length = l; od[p].offset = lb; lb += l; } check_ok: s->sync_packet -= inurb->number_of_packets; if (unlikely(s->sync_packet < -2 || s->sync_packet > 0)) { snd_printk(KERN_WARNING "invalid sync_packet = %i;" " p=%i nop=%i %i %x %x %x > %x\n", s->sync_packet, p, inurb->number_of_packets, s->idle_outsize + lb + l, s->idle_outsize, lb, l, s->period_size); return -1; } if (unlikely(lb % s->cfg.frame_size)) { snd_printk(KERN_WARNING"invalid outsize = %i\n", lb); return -1; } s->idle_outsize += lb - s->period_size; io->number_of_packets = p; io->transfer_buffer_length = lb; if (s->idle_outsize <= 0) return 0; snd_printk(KERN_WARNING "idle=%i\n", s->idle_outsize); return -1; } static void prepare_inurb(int number_of_packets, struct urb *iu) { struct usb_iso_packet_descriptor *id; int p; iu->number_of_packets = number_of_packets; id = iu->iso_frame_desc; id->offset = 0; for (p = 0; p < iu->number_of_packets - 1; ++p) id[p + 1].offset = id[p].offset + id[p].length; iu->transfer_buffer_length = id[0].length * iu->number_of_packets; } static int submit_urbs(struct usb_stream_kernel *sk, struct urb *inurb, struct urb *outurb) { int err; prepare_inurb(sk->idle_outurb->number_of_packets, sk->idle_inurb); err = usb_submit_urb(sk->idle_inurb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR "%i\n", err); return err; } sk->idle_inurb = sk->completed_inurb; sk->completed_inurb = inurb; err = usb_submit_urb(sk->idle_outurb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR "%i\n", err); return err; } sk->idle_outurb = sk->completed_outurb; sk->completed_outurb = outurb; return 0; } #ifdef DEBUG_LOOP_BACK /* This loop_back() shows how to read/write the period data. */ static void loop_back(struct usb_stream *s) { char *i, *o; int il, ol, l, p; struct urb *iu; struct usb_iso_packet_descriptor *id; o = s->playback1st_to; ol = s->playback1st_size; l = 0; if (s->insplit_pack >= 0) { iu = sk->idle_inurb; id = iu->iso_frame_desc; p = s->insplit_pack; } else goto second; loop: for (; p < iu->number_of_packets && l < s->period_size; ++p) { i = iu->transfer_buffer + id[p].offset; il = id[p].actual_length; if (l + il > s->period_size) il = s->period_size - l; if (il <= ol) { memcpy(o, i, il); o += il; ol -= il; } else { memcpy(o, i, ol); singen_6pack(o, ol); o = s->playback_to; memcpy(o, i + ol, il - ol); o += il - ol; ol = s->period_size - s->playback1st_size; } l += il; } if (iu == sk->completed_inurb) { if (l != s->period_size) printk(KERN_DEBUG"%s:%i %i\n", __func__, __LINE__, l/(int)s->cfg.frame_size); return; } second: iu = sk->completed_inurb; id = iu->iso_frame_desc; p = 0; goto loop; } #else static void loop_back(struct usb_stream *s) { } #endif static void stream_idle(struct usb_stream_kernel *sk, struct urb *inurb, struct urb *outurb) { struct usb_stream *s = sk->s; int l, p; int insize = s->idle_insize; int urb_size = 0; s->inpacket_split = s->next_inpacket_split; s->inpacket_split_at = s->next_inpacket_split_at; s->next_inpacket_split = -1; s->next_inpacket_split_at = 0; for (p = 0; p < inurb->number_of_packets; ++p) { struct usb_iso_packet_descriptor *id = inurb->iso_frame_desc; l = id[p].actual_length; if (unlikely(l == 0 || id[p].status)) { snd_printk(KERN_WARNING "underrun, status=%u\n", id[p].status); goto err_out; } s->inpacket_head++; s->inpacket_head %= s->inpackets; if (s->inpacket_split == -1) s->inpacket_split = s->inpacket_head; s->inpacket[s->inpacket_head].offset = id[p].offset + (inurb->transfer_buffer - (void *)s); s->inpacket[s->inpacket_head].length = l; if (insize + l > s->period_size && s->next_inpacket_split == -1) { s->next_inpacket_split = s->inpacket_head; s->next_inpacket_split_at = s->period_size - insize; } insize += l; urb_size += l; } s->idle_insize += urb_size - s->period_size; if (s->idle_insize < 0) { snd_printk(KERN_WARNING "%i\n", (s->idle_insize)/(int)s->cfg.frame_size); goto err_out; } s->insize_done += urb_size; l = s->idle_outsize; s->outpacket[0].offset = (sk->idle_outurb->transfer_buffer - sk->write_page) - l; if (usb_stream_prepare_playback(sk, inurb) < 0) goto err_out; s->outpacket[0].length = sk->idle_outurb->transfer_buffer_length + l; s->outpacket[1].offset = sk->completed_outurb->transfer_buffer - sk->write_page; if (submit_urbs(sk, inurb, outurb) < 0) goto err_out; loop_back(s); s->periods_done++; wake_up_all(&sk->sleep); return; err_out: s->state = usb_stream_xrun; wake_up_all(&sk->sleep); } static void i_capture_idle(struct urb *urb) { struct usb_stream_kernel *sk = urb->context; if (balance_capture(sk, urb)) stream_idle(sk, urb, sk->i_urb); } static void i_playback_idle(struct urb *urb) { struct usb_stream_kernel *sk = urb->context; if (balance_playback(sk, urb)) stream_idle(sk, sk->i_urb, urb); } static void stream_start(struct usb_stream_kernel *sk, struct urb *inurb, struct urb *outurb) { struct usb_stream *s = sk->s; if (s->state >= usb_stream_sync1) { int l, p, max_diff, max_diff_0; int urb_size = 0; unsigned frames_per_packet, min_frames = 0; frames_per_packet = (s->period_size - s->idle_insize); frames_per_packet <<= 8; frames_per_packet /= s->cfg.frame_size * inurb->number_of_packets; frames_per_packet++; max_diff_0 = s->cfg.frame_size; if (s->cfg.period_frames >= 256) max_diff_0 <<= 1; if (s->cfg.period_frames >= 1024) max_diff_0 <<= 1; max_diff = max_diff_0; for (p = 0; p < inurb->number_of_packets; ++p) { int diff; l = inurb->iso_frame_desc[p].actual_length; urb_size += l; min_frames += frames_per_packet; diff = urb_size - (min_frames >> 8) * s->cfg.frame_size; if (diff < max_diff) { snd_printdd(KERN_DEBUG "%i %i %i %i\n", s->insize_done, urb_size / (int)s->cfg.frame_size, inurb->number_of_packets, diff); max_diff = diff; } } s->idle_insize -= max_diff - max_diff_0; s->idle_insize += urb_size - s->period_size; if (s->idle_insize < 0) { snd_printk(KERN_WARNING "%i %i %i\n", s->idle_insize, urb_size, s->period_size); return; } else if (s->idle_insize == 0) { s->next_inpacket_split = (s->inpacket_head + 1) % s->inpackets; s->next_inpacket_split_at = 0; } else { unsigned split = s->inpacket_head; l = s->idle_insize; while (l > s->inpacket[split].length) { l -= s->inpacket[split].length; if (split == 0) split = s->inpackets - 1; else split--; } s->next_inpacket_split = split; s->next_inpacket_split_at = s->inpacket[split].length - l; } s->insize_done += urb_size; if (usb_stream_prepare_playback(sk, inurb) < 0) return; } else playback_prep_freqn(sk, sk->idle_outurb); if (submit_urbs(sk, inurb, outurb) < 0) return; if (s->state == usb_stream_sync1 && s->insize_done > 360000) { /* just guesswork ^^^^^^ */ s->state = usb_stream_ready; subs_set_complete(sk->inurb, i_capture_idle); subs_set_complete(sk->outurb, i_playback_idle); } } static void i_capture_start(struct urb *urb) { struct usb_iso_packet_descriptor *id = urb->iso_frame_desc; struct usb_stream_kernel *sk = urb->context; struct usb_stream *s = sk->s; int p; int empty = 0; if (urb->status) { snd_printk(KERN_WARNING "status=%i\n", urb->status); return; } for (p = 0; p < urb->number_of_packets; ++p) { int l = id[p].actual_length; if (l < s->cfg.frame_size) { ++empty; if (s->state >= usb_stream_sync0) { snd_printk(KERN_WARNING "%i\n", l); return; } } s->inpacket_head++; s->inpacket_head %= s->inpackets; s->inpacket[s->inpacket_head].offset = id[p].offset + (urb->transfer_buffer - (void *)s); s->inpacket[s->inpacket_head].length = l; } #ifdef SHOW_EMPTY if (empty) { printk(KERN_DEBUG"%s:%i: %i", __func__, __LINE__, urb->iso_frame_desc[0].actual_length); for (pack = 1; pack < urb->number_of_packets; ++pack) { int l = urb->iso_frame_desc[pack].actual_length; printk(" %i", l); } printk("\n"); } #endif if (!empty && s->state < usb_stream_sync1) ++s->state; if (balance_capture(sk, urb)) stream_start(sk, urb, sk->i_urb); } static void i_playback_start(struct urb *urb) { struct usb_stream_kernel *sk = urb->context; if (balance_playback(sk, urb)) stream_start(sk, sk->i_urb, urb); } int usb_stream_start(struct usb_stream_kernel *sk) { struct usb_stream *s = sk->s; int frame = 0, iters = 0; int u, err; int try = 0; if (s->state != usb_stream_stopped) return -EAGAIN; subs_set_complete(sk->inurb, i_capture_start); subs_set_complete(sk->outurb, i_playback_start); memset(sk->write_page, 0, s->write_size); dotry: s->insize_done = 0; s->idle_insize = 0; s->idle_outsize = 0; s->sync_packet = -1; s->inpacket_head = -1; sk->iso_frame_balance = 0; ++try; for (u = 0; u < 2; u++) { struct urb *inurb = sk->inurb[u]; struct urb *outurb = sk->outurb[u]; playback_prep_freqn(sk, outurb); inurb->number_of_packets = outurb->number_of_packets; inurb->transfer_buffer_length = inurb->number_of_packets * inurb->iso_frame_desc[0].length; if (u == 0) { int now; struct usb_device *dev = inurb->dev; frame = usb_get_current_frame_number(dev); do { now = usb_get_current_frame_number(dev); ++iters; } while (now > -1 && now == frame); } err = usb_submit_urb(inurb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])" " returned %i\n", u, err); return err; } err = usb_submit_urb(outurb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])" " returned %i\n", u, err); return err; } if (inurb->start_frame != outurb->start_frame) { snd_printd(KERN_DEBUG "u[%i] start_frames differ in:%u out:%u\n", u, inurb->start_frame, outurb->start_frame); goto check_retry; } } snd_printdd(KERN_DEBUG "%i %i\n", frame, iters); try = 0; check_retry: if (try) { usb_stream_stop(sk); if (try < 5) { msleep(1500); snd_printd(KERN_DEBUG "goto dotry;\n"); goto dotry; } snd_printk(KERN_WARNING"couldn't start" " all urbs on the same start_frame.\n"); return -EFAULT; } sk->idle_inurb = sk->inurb[USB_STREAM_NURBS - 2]; sk->idle_outurb = sk->outurb[USB_STREAM_NURBS - 2]; sk->completed_inurb = sk->inurb[USB_STREAM_NURBS - 1]; sk->completed_outurb = sk->outurb[USB_STREAM_NURBS - 1]; /* wait, check */ { int wait_ms = 3000; while (s->state != usb_stream_ready && wait_ms > 0) { snd_printdd(KERN_DEBUG "%i\n", s->state); msleep(200); wait_ms -= 200; } } return s->state == usb_stream_ready ? 0 : -EFAULT; } /* stop */ void usb_stream_stop(struct usb_stream_kernel *sk) { int u; if (!sk->s) return; for (u = 0; u < USB_STREAM_NURBS; ++u) { usb_kill_urb(sk->inurb[u]); usb_kill_urb(sk->outurb[u]); } sk->s->state = usb_stream_stopped; msleep(400); }
gpl-2.0
TEAM-Gummy/Gummy_kernel_grouper
sound/usb/usx2y/usb_stream.c
7225
19509
/* * Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/usb.h> #include <linux/gfp.h> #include "usb_stream.h" /* setup */ static unsigned usb_stream_next_packet_size(struct usb_stream_kernel *sk) { struct usb_stream *s = sk->s; sk->out_phase_peeked = (sk->out_phase & 0xffff) + sk->freqn; return (sk->out_phase_peeked >> 16) * s->cfg.frame_size; } static void playback_prep_freqn(struct usb_stream_kernel *sk, struct urb *urb) { struct usb_stream *s = sk->s; int pack, lb = 0; for (pack = 0; pack < sk->n_o_ps; pack++) { int l = usb_stream_next_packet_size(sk); if (s->idle_outsize + lb + l > s->period_size) goto check; sk->out_phase = sk->out_phase_peeked; urb->iso_frame_desc[pack].offset = lb; urb->iso_frame_desc[pack].length = l; lb += l; } snd_printdd(KERN_DEBUG "%i\n", lb); check: urb->number_of_packets = pack; urb->transfer_buffer_length = lb; s->idle_outsize += lb - s->period_size; snd_printdd(KERN_DEBUG "idle=%i ul=%i ps=%i\n", s->idle_outsize, lb, s->period_size); } static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize, struct urb **urbs, char *transfer, struct usb_device *dev, int pipe) { int u, p; int maxpacket = use_packsize ? use_packsize : usb_maxpacket(dev, pipe, usb_pipeout(pipe)); int transfer_length = maxpacket * sk->n_o_ps; for (u = 0; u < USB_STREAM_NURBS; ++u, transfer += transfer_length) { struct urb *urb = urbs[u]; struct usb_iso_packet_descriptor *desc; urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = transfer; urb->dev = dev; urb->pipe = pipe; urb->number_of_packets = sk->n_o_ps; urb->context = sk; urb->interval = 1; if (usb_pipeout(pipe)) continue; urb->transfer_buffer_length = transfer_length; desc = urb->iso_frame_desc; desc->offset = 0; desc->length = maxpacket; for (p = 1; p < sk->n_o_ps; ++p) { desc[p].offset = desc[p - 1].offset + maxpacket; desc[p].length = maxpacket; } } } static void init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize, struct usb_device *dev, int in_pipe, int out_pipe) { struct usb_stream *s = sk->s; char *indata = (char *)s + sizeof(*s) + sizeof(struct usb_stream_packet) * s->inpackets; int u; for (u = 0; u < USB_STREAM_NURBS; ++u) { sk->inurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL); sk->outurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL); } init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe); init_pipe_urbs(sk, use_packsize, sk->outurb, sk->write_page, dev, out_pipe); } /* * convert a sampling rate into our full speed format (fs/1000 in Q16.16) * this will overflow at approx 524 kHz */ static inline unsigned get_usb_full_speed_rate(unsigned rate) { return ((rate << 13) + 62) / 125; } /* * convert a sampling rate into USB high speed format (fs/8000 in Q16.16) * this will overflow at approx 4 MHz */ static inline unsigned get_usb_high_speed_rate(unsigned rate) { return ((rate << 10) + 62) / 125; } void usb_stream_free(struct usb_stream_kernel *sk) { struct usb_stream *s; unsigned u; for (u = 0; u < USB_STREAM_NURBS; ++u) { usb_free_urb(sk->inurb[u]); sk->inurb[u] = NULL; usb_free_urb(sk->outurb[u]); sk->outurb[u] = NULL; } s = sk->s; if (!s) return; free_pages((unsigned long)sk->write_page, get_order(s->write_size)); sk->write_page = NULL; free_pages((unsigned long)s, get_order(s->read_size)); sk->s = NULL; } struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk, struct usb_device *dev, unsigned in_endpoint, unsigned out_endpoint, unsigned sample_rate, unsigned use_packsize, unsigned period_frames, unsigned frame_size) { int packets, max_packsize; int in_pipe, out_pipe; int read_size = sizeof(struct usb_stream); int write_size; int usb_frames = dev->speed == USB_SPEED_HIGH ? 8000 : 1000; int pg; in_pipe = usb_rcvisocpipe(dev, in_endpoint); out_pipe = usb_sndisocpipe(dev, out_endpoint); max_packsize = use_packsize ? use_packsize : usb_maxpacket(dev, in_pipe, 0); /* t_period = period_frames / sample_rate iso_packs = t_period / t_iso_frame = (period_frames / sample_rate) * (1 / t_iso_frame) */ packets = period_frames * usb_frames / sample_rate + 1; if (dev->speed == USB_SPEED_HIGH) packets = (packets + 7) & ~7; read_size += packets * USB_STREAM_URBDEPTH * (max_packsize + sizeof(struct usb_stream_packet)); max_packsize = usb_maxpacket(dev, out_pipe, 1); write_size = max_packsize * packets * USB_STREAM_URBDEPTH; if (read_size >= 256*PAGE_SIZE || write_size >= 256*PAGE_SIZE) { snd_printk(KERN_WARNING "a size exceeds 128*PAGE_SIZE\n"); goto out; } pg = get_order(read_size); sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); if (!sk->s) { snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); goto out; } sk->s->cfg.version = USB_STREAM_INTERFACE_VERSION; sk->s->read_size = read_size; sk->s->cfg.sample_rate = sample_rate; sk->s->cfg.frame_size = frame_size; sk->n_o_ps = packets; sk->s->inpackets = packets * USB_STREAM_URBDEPTH; sk->s->cfg.period_frames = period_frames; sk->s->period_size = frame_size * period_frames; sk->s->write_size = write_size; pg = get_order(write_size); sk->write_page = (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); if (!sk->write_page) { snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); usb_stream_free(sk); return NULL; } /* calculate the frequency in 16.16 format */ if (dev->speed == USB_SPEED_FULL) sk->freqn = get_usb_full_speed_rate(sample_rate); else sk->freqn = get_usb_high_speed_rate(sample_rate); init_urbs(sk, use_packsize, dev, in_pipe, out_pipe); sk->s->state = usb_stream_stopped; out: return sk->s; } /* start */ static bool balance_check(struct usb_stream_kernel *sk, struct urb *urb) { bool r; if (unlikely(urb->status)) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT) snd_printk(KERN_WARNING "status=%i\n", urb->status); sk->iso_frame_balance = 0x7FFFFFFF; return false; } r = sk->iso_frame_balance == 0; if (!r) sk->i_urb = urb; return r; } static bool balance_playback(struct usb_stream_kernel *sk, struct urb *urb) { sk->iso_frame_balance += urb->number_of_packets; return balance_check(sk, urb); } static bool balance_capture(struct usb_stream_kernel *sk, struct urb *urb) { sk->iso_frame_balance -= urb->number_of_packets; return balance_check(sk, urb); } static void subs_set_complete(struct urb **urbs, void (*complete)(struct urb *)) { int u; for (u = 0; u < USB_STREAM_NURBS; u++) { struct urb *urb = urbs[u]; urb->complete = complete; } } static int usb_stream_prepare_playback(struct usb_stream_kernel *sk, struct urb *inurb) { struct usb_stream *s = sk->s; struct urb *io; struct usb_iso_packet_descriptor *id, *od; int p = 0, lb = 0, l = 0; io = sk->idle_outurb; od = io->iso_frame_desc; for (; s->sync_packet < 0; ++p, ++s->sync_packet) { struct urb *ii = sk->completed_inurb; id = ii->iso_frame_desc + ii->number_of_packets + s->sync_packet; l = id->actual_length; od[p].length = l; od[p].offset = lb; lb += l; } for (; s->sync_packet < inurb->number_of_packets && p < sk->n_o_ps; ++p, ++s->sync_packet) { l = inurb->iso_frame_desc[s->sync_packet].actual_length; if (s->idle_outsize + lb + l > s->period_size) goto check_ok; od[p].length = l; od[p].offset = lb; lb += l; } check_ok: s->sync_packet -= inurb->number_of_packets; if (unlikely(s->sync_packet < -2 || s->sync_packet > 0)) { snd_printk(KERN_WARNING "invalid sync_packet = %i;" " p=%i nop=%i %i %x %x %x > %x\n", s->sync_packet, p, inurb->number_of_packets, s->idle_outsize + lb + l, s->idle_outsize, lb, l, s->period_size); return -1; } if (unlikely(lb % s->cfg.frame_size)) { snd_printk(KERN_WARNING"invalid outsize = %i\n", lb); return -1; } s->idle_outsize += lb - s->period_size; io->number_of_packets = p; io->transfer_buffer_length = lb; if (s->idle_outsize <= 0) return 0; snd_printk(KERN_WARNING "idle=%i\n", s->idle_outsize); return -1; } static void prepare_inurb(int number_of_packets, struct urb *iu) { struct usb_iso_packet_descriptor *id; int p; iu->number_of_packets = number_of_packets; id = iu->iso_frame_desc; id->offset = 0; for (p = 0; p < iu->number_of_packets - 1; ++p) id[p + 1].offset = id[p].offset + id[p].length; iu->transfer_buffer_length = id[0].length * iu->number_of_packets; } static int submit_urbs(struct usb_stream_kernel *sk, struct urb *inurb, struct urb *outurb) { int err; prepare_inurb(sk->idle_outurb->number_of_packets, sk->idle_inurb); err = usb_submit_urb(sk->idle_inurb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR "%i\n", err); return err; } sk->idle_inurb = sk->completed_inurb; sk->completed_inurb = inurb; err = usb_submit_urb(sk->idle_outurb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR "%i\n", err); return err; } sk->idle_outurb = sk->completed_outurb; sk->completed_outurb = outurb; return 0; } #ifdef DEBUG_LOOP_BACK /* This loop_back() shows how to read/write the period data. */ static void loop_back(struct usb_stream *s) { char *i, *o; int il, ol, l, p; struct urb *iu; struct usb_iso_packet_descriptor *id; o = s->playback1st_to; ol = s->playback1st_size; l = 0; if (s->insplit_pack >= 0) { iu = sk->idle_inurb; id = iu->iso_frame_desc; p = s->insplit_pack; } else goto second; loop: for (; p < iu->number_of_packets && l < s->period_size; ++p) { i = iu->transfer_buffer + id[p].offset; il = id[p].actual_length; if (l + il > s->period_size) il = s->period_size - l; if (il <= ol) { memcpy(o, i, il); o += il; ol -= il; } else { memcpy(o, i, ol); singen_6pack(o, ol); o = s->playback_to; memcpy(o, i + ol, il - ol); o += il - ol; ol = s->period_size - s->playback1st_size; } l += il; } if (iu == sk->completed_inurb) { if (l != s->period_size) printk(KERN_DEBUG"%s:%i %i\n", __func__, __LINE__, l/(int)s->cfg.frame_size); return; } second: iu = sk->completed_inurb; id = iu->iso_frame_desc; p = 0; goto loop; } #else static void loop_back(struct usb_stream *s) { } #endif static void stream_idle(struct usb_stream_kernel *sk, struct urb *inurb, struct urb *outurb) { struct usb_stream *s = sk->s; int l, p; int insize = s->idle_insize; int urb_size = 0; s->inpacket_split = s->next_inpacket_split; s->inpacket_split_at = s->next_inpacket_split_at; s->next_inpacket_split = -1; s->next_inpacket_split_at = 0; for (p = 0; p < inurb->number_of_packets; ++p) { struct usb_iso_packet_descriptor *id = inurb->iso_frame_desc; l = id[p].actual_length; if (unlikely(l == 0 || id[p].status)) { snd_printk(KERN_WARNING "underrun, status=%u\n", id[p].status); goto err_out; } s->inpacket_head++; s->inpacket_head %= s->inpackets; if (s->inpacket_split == -1) s->inpacket_split = s->inpacket_head; s->inpacket[s->inpacket_head].offset = id[p].offset + (inurb->transfer_buffer - (void *)s); s->inpacket[s->inpacket_head].length = l; if (insize + l > s->period_size && s->next_inpacket_split == -1) { s->next_inpacket_split = s->inpacket_head; s->next_inpacket_split_at = s->period_size - insize; } insize += l; urb_size += l; } s->idle_insize += urb_size - s->period_size; if (s->idle_insize < 0) { snd_printk(KERN_WARNING "%i\n", (s->idle_insize)/(int)s->cfg.frame_size); goto err_out; } s->insize_done += urb_size; l = s->idle_outsize; s->outpacket[0].offset = (sk->idle_outurb->transfer_buffer - sk->write_page) - l; if (usb_stream_prepare_playback(sk, inurb) < 0) goto err_out; s->outpacket[0].length = sk->idle_outurb->transfer_buffer_length + l; s->outpacket[1].offset = sk->completed_outurb->transfer_buffer - sk->write_page; if (submit_urbs(sk, inurb, outurb) < 0) goto err_out; loop_back(s); s->periods_done++; wake_up_all(&sk->sleep); return; err_out: s->state = usb_stream_xrun; wake_up_all(&sk->sleep); } static void i_capture_idle(struct urb *urb) { struct usb_stream_kernel *sk = urb->context; if (balance_capture(sk, urb)) stream_idle(sk, urb, sk->i_urb); } static void i_playback_idle(struct urb *urb) { struct usb_stream_kernel *sk = urb->context; if (balance_playback(sk, urb)) stream_idle(sk, sk->i_urb, urb); } static void stream_start(struct usb_stream_kernel *sk, struct urb *inurb, struct urb *outurb) { struct usb_stream *s = sk->s; if (s->state >= usb_stream_sync1) { int l, p, max_diff, max_diff_0; int urb_size = 0; unsigned frames_per_packet, min_frames = 0; frames_per_packet = (s->period_size - s->idle_insize); frames_per_packet <<= 8; frames_per_packet /= s->cfg.frame_size * inurb->number_of_packets; frames_per_packet++; max_diff_0 = s->cfg.frame_size; if (s->cfg.period_frames >= 256) max_diff_0 <<= 1; if (s->cfg.period_frames >= 1024) max_diff_0 <<= 1; max_diff = max_diff_0; for (p = 0; p < inurb->number_of_packets; ++p) { int diff; l = inurb->iso_frame_desc[p].actual_length; urb_size += l; min_frames += frames_per_packet; diff = urb_size - (min_frames >> 8) * s->cfg.frame_size; if (diff < max_diff) { snd_printdd(KERN_DEBUG "%i %i %i %i\n", s->insize_done, urb_size / (int)s->cfg.frame_size, inurb->number_of_packets, diff); max_diff = diff; } } s->idle_insize -= max_diff - max_diff_0; s->idle_insize += urb_size - s->period_size; if (s->idle_insize < 0) { snd_printk(KERN_WARNING "%i %i %i\n", s->idle_insize, urb_size, s->period_size); return; } else if (s->idle_insize == 0) { s->next_inpacket_split = (s->inpacket_head + 1) % s->inpackets; s->next_inpacket_split_at = 0; } else { unsigned split = s->inpacket_head; l = s->idle_insize; while (l > s->inpacket[split].length) { l -= s->inpacket[split].length; if (split == 0) split = s->inpackets - 1; else split--; } s->next_inpacket_split = split; s->next_inpacket_split_at = s->inpacket[split].length - l; } s->insize_done += urb_size; if (usb_stream_prepare_playback(sk, inurb) < 0) return; } else playback_prep_freqn(sk, sk->idle_outurb); if (submit_urbs(sk, inurb, outurb) < 0) return; if (s->state == usb_stream_sync1 && s->insize_done > 360000) { /* just guesswork ^^^^^^ */ s->state = usb_stream_ready; subs_set_complete(sk->inurb, i_capture_idle); subs_set_complete(sk->outurb, i_playback_idle); } } static void i_capture_start(struct urb *urb) { struct usb_iso_packet_descriptor *id = urb->iso_frame_desc; struct usb_stream_kernel *sk = urb->context; struct usb_stream *s = sk->s; int p; int empty = 0; if (urb->status) { snd_printk(KERN_WARNING "status=%i\n", urb->status); return; } for (p = 0; p < urb->number_of_packets; ++p) { int l = id[p].actual_length; if (l < s->cfg.frame_size) { ++empty; if (s->state >= usb_stream_sync0) { snd_printk(KERN_WARNING "%i\n", l); return; } } s->inpacket_head++; s->inpacket_head %= s->inpackets; s->inpacket[s->inpacket_head].offset = id[p].offset + (urb->transfer_buffer - (void *)s); s->inpacket[s->inpacket_head].length = l; } #ifdef SHOW_EMPTY if (empty) { printk(KERN_DEBUG"%s:%i: %i", __func__, __LINE__, urb->iso_frame_desc[0].actual_length); for (pack = 1; pack < urb->number_of_packets; ++pack) { int l = urb->iso_frame_desc[pack].actual_length; printk(" %i", l); } printk("\n"); } #endif if (!empty && s->state < usb_stream_sync1) ++s->state; if (balance_capture(sk, urb)) stream_start(sk, urb, sk->i_urb); } static void i_playback_start(struct urb *urb) { struct usb_stream_kernel *sk = urb->context; if (balance_playback(sk, urb)) stream_start(sk, sk->i_urb, urb); } int usb_stream_start(struct usb_stream_kernel *sk) { struct usb_stream *s = sk->s; int frame = 0, iters = 0; int u, err; int try = 0; if (s->state != usb_stream_stopped) return -EAGAIN; subs_set_complete(sk->inurb, i_capture_start); subs_set_complete(sk->outurb, i_playback_start); memset(sk->write_page, 0, s->write_size); dotry: s->insize_done = 0; s->idle_insize = 0; s->idle_outsize = 0; s->sync_packet = -1; s->inpacket_head = -1; sk->iso_frame_balance = 0; ++try; for (u = 0; u < 2; u++) { struct urb *inurb = sk->inurb[u]; struct urb *outurb = sk->outurb[u]; playback_prep_freqn(sk, outurb); inurb->number_of_packets = outurb->number_of_packets; inurb->transfer_buffer_length = inurb->number_of_packets * inurb->iso_frame_desc[0].length; if (u == 0) { int now; struct usb_device *dev = inurb->dev; frame = usb_get_current_frame_number(dev); do { now = usb_get_current_frame_number(dev); ++iters; } while (now > -1 && now == frame); } err = usb_submit_urb(inurb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])" " returned %i\n", u, err); return err; } err = usb_submit_urb(outurb, GFP_ATOMIC); if (err < 0) { snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])" " returned %i\n", u, err); return err; } if (inurb->start_frame != outurb->start_frame) { snd_printd(KERN_DEBUG "u[%i] start_frames differ in:%u out:%u\n", u, inurb->start_frame, outurb->start_frame); goto check_retry; } } snd_printdd(KERN_DEBUG "%i %i\n", frame, iters); try = 0; check_retry: if (try) { usb_stream_stop(sk); if (try < 5) { msleep(1500); snd_printd(KERN_DEBUG "goto dotry;\n"); goto dotry; } snd_printk(KERN_WARNING"couldn't start" " all urbs on the same start_frame.\n"); return -EFAULT; } sk->idle_inurb = sk->inurb[USB_STREAM_NURBS - 2]; sk->idle_outurb = sk->outurb[USB_STREAM_NURBS - 2]; sk->completed_inurb = sk->inurb[USB_STREAM_NURBS - 1]; sk->completed_outurb = sk->outurb[USB_STREAM_NURBS - 1]; /* wait, check */ { int wait_ms = 3000; while (s->state != usb_stream_ready && wait_ms > 0) { snd_printdd(KERN_DEBUG "%i\n", s->state); msleep(200); wait_ms -= 200; } } return s->state == usb_stream_ready ? 0 : -EFAULT; } /* stop */ void usb_stream_stop(struct usb_stream_kernel *sk) { int u; if (!sk->s) return; for (u = 0; u < USB_STREAM_NURBS; ++u) { usb_kill_urb(sk->inurb[u]); usb_kill_urb(sk->outurb[u]); } sk->s->state = usb_stream_stopped; msleep(400); }
gpl-2.0
HRTKernel/samsung_exynos_7420
arch/sparc/kernel/unaligned_32.c
7225
9389
/* * unaligned.c: Unaligned load/store trap handling with special * cases for the kernel to do them more quickly. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/perf_event.h> enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ both, /* Swap, ldstub, etc. */ fpload, fpstore, invalid, }; static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; if(!tmp) return load; else { if(((insn>>19)&0x3f) == 15) return both; else return store; } } /* 8 = double-word, 4 = word, 2 = half-word */ static inline int decode_access_size(unsigned int insn) { insn = (insn >> 19) & 3; if(!insn) return 4; else if(insn == 3) return 8; else if(insn == 2) return 2; else { printk("Impossible unaligned trap. insn=%08x\n", insn); die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs); return 4; /* just to keep gcc happy. */ } } /* 0x400000 = signed, 0 = unsigned */ static inline int decode_signedness(unsigned int insn) { return (insn & 0x400000); } static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd) { if(rs2 >= 16 || rs1 >= 16 || rd >= 16) { /* Wheee... */ __asm__ __volatile__("save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "restore; restore; restore; restore;\n\t" "restore; restore; restore;\n\t"); } } static inline int sign_extend_imm13(int imm) { return imm << 19 >> 19; } static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { struct reg_window32 *win; if(reg < 16) return (!reg ? 0 : regs->u_regs[reg]); /* Ho hum, the slightly complicated case. */ win = (struct reg_window32 *) regs->u_regs[UREG_FP]; return win->locals[reg - 16]; /* yes, I know what this does... */ } static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs) { struct reg_window32 __user *win; unsigned long ret; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); /* Ho hum, the slightly complicated case. */ win = (struct reg_window32 __user *) regs->u_regs[UREG_FP]; if ((unsigned long)win & 3) return -1; if (get_user(ret, &win->locals[reg - 16])) return -1; return ret; } static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) { struct reg_window32 *win; if(reg < 16) return &regs->u_regs[reg]; win = (struct reg_window32 *) regs->u_regs[UREG_FP]; return &win->locals[reg - 16]; } static unsigned long compute_effective_address(struct pt_regs *regs, unsigned int insn) { unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; unsigned int rd = (insn >> 25) & 0x1f; if(insn & 0x2000) { maybe_flush_windows(rs1, 0, rd); return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd); return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); } } unsigned long safe_compute_effective_address(struct pt_regs *regs, unsigned int insn) { unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; unsigned int rd = (insn >> 25) & 0x1f; if(insn & 0x2000) { maybe_flush_windows(rs1, 0, rd); return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd); return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs)); } } /* This is just to make gcc think panic does return... */ static void unaligned_panic(char *str) { panic(str); } /* una_asm.S */ extern int do_int_load(unsigned long *dest_reg, int size, unsigned long *saddr, int is_signed); extern int __do_int_store(unsigned long *dst_addr, int size, unsigned long *src_val); static int do_int_store(int reg_num, int size, unsigned long *dst_addr, struct pt_regs *regs) { unsigned long zero[2] = { 0, 0 }; unsigned long *src_val; if (reg_num) src_val = fetch_reg_addr(reg_num, regs); else { src_val = &zero[0]; if (size == 8) zero[1] = fetch_reg(1, regs); } return __do_int_store(dst_addr, size, src_val); } extern void smp_capture(void); extern void smp_release(void); static inline void advance(struct pt_regs *regs) { regs->pc = regs->npc; regs->npc += 4; } static inline int floating_point_load_or_store_p(unsigned int insn) { return (insn >> 24) & 1; } static inline int ok_for_kernel(unsigned int insn) { return !floating_point_load_or_store_p(insn); } static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) { unsigned long g2 = regs->u_regs [UREG_G2]; unsigned long fixup = search_extables_range(regs->pc, &g2); if (!fixup) { unsigned long address = compute_effective_address(regs, insn); if(address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); } else printk(KERN_ALERT "Unable to handle kernel paging request in mna handler"); printk(KERN_ALERT " at virtual address %08lx\n",address); printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n", (current->mm ? current->mm->context : current->active_mm->context)); printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n", (current->mm ? (unsigned long) current->mm->pgd : (unsigned long) current->active_mm->pgd)); die_if_kernel("Oops", regs); /* Not reached */ } regs->pc = fixup; regs->npc = regs->pc + 4; regs->u_regs [UREG_G2] = g2; } asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); int size = decode_access_size(insn); if(!ok_for_kernel(insn) || dir == both) { printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", regs->pc); unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); } else { unsigned long addr = compute_effective_address(regs, insn); int err; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), size, (unsigned long *) addr, decode_signedness(insn)); break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } if (err) kernel_mna_trap_fault(regs, insn); else advance(regs); } } static inline int ok_for_user(struct pt_regs *regs, unsigned int insn, enum direction dir) { unsigned int reg; int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE; int size = ((insn >> 19) & 3) == 3 ? 8 : 4; if ((regs->pc | regs->npc) & 3) return 0; /* Must access_ok() in all the necessary places. */ #define WINREG_ADDR(regnum) \ ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum))) reg = (insn >> 25) & 0x1f; if (reg >= 16) { if (!access_ok(check, WINREG_ADDR(reg - 16), size)) return -EFAULT; } reg = (insn >> 14) & 0x1f; if (reg >= 16) { if (!access_ok(check, WINREG_ADDR(reg - 16), size)) return -EFAULT; } if (!(insn & 0x2000)) { reg = (insn & 0x1f); if (reg >= 16) { if (!access_ok(check, WINREG_ADDR(reg - 16), size)) return -EFAULT; } } #undef WINREG_ADDR return 0; } static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) { siginfo_t info; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void __user *)safe_compute_effective_address(regs, insn); info.si_trapno = 0; send_sig_info(SIGBUS, &info, current); } asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir; if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || (((insn >> 30) & 3) != 3)) goto kill_user; dir = decode_direction(insn); if(!ok_for_user(regs, insn, dir)) { goto kill_user; } else { int err, size = decode_access_size(insn); unsigned long addr; if(floating_point_load_or_store_p(insn)) { printk("User FPU load/store unaligned unsupported.\n"); goto kill_user; } addr = compute_effective_address(regs, insn); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), size, (unsigned long *) addr, decode_signedness(insn)); break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs); break; case both: /* * This was supported in 2.4. However, we question * the value of SWAP instruction across word boundaries. */ printk("Unaligned SWAP unsupported.\n"); err = -EFAULT; break; default: unaligned_panic("Impossible user unaligned trap."); goto out; } if (err) goto kill_user; else advance(regs); goto out; } kill_user: user_mna_trap_fault(regs, insn); out: ; }
gpl-2.0
stedman420/android_kernel_zte_hera
drivers/input/joystick/a3d.c
7993
11356
/* * Copyright (c) 1998-2001 Vojtech Pavlik */ /* * FP-Gaming Assassin 3D joystick driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/gameport.h> #include <linux/input.h> #include <linux/jiffies.h> #define DRIVER_DESC "FP-Gaming Assassin 3D joystick driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define A3D_MAX_START 600 /* 600 us */ #define A3D_MAX_STROBE 80 /* 80 us */ #define A3D_MAX_LENGTH 40 /* 40*3 bits */ #define A3D_MODE_A3D 1 /* Assassin 3D */ #define A3D_MODE_PAN 2 /* Panther */ #define A3D_MODE_OEM 3 /* Panther OEM version */ #define A3D_MODE_PXL 4 /* Panther XL */ static char *a3d_names[] = { NULL, "FP-Gaming Assassin 3D", "MadCatz Panther", "OEM Panther", "MadCatz Panther XL", "MadCatz Panther XL w/ rudder" }; struct a3d { struct gameport *gameport; struct gameport *adc; struct input_dev *dev; int axes[4]; int buttons; int mode; int length; int reads; int bads; char phys[32]; }; /* * a3d_read_packet() reads an Assassin 3D packet. */ static int a3d_read_packet(struct gameport *gameport, int length, char *data) { unsigned long flags; unsigned char u, v; unsigned int t, s; int i; i = 0; t = gameport_time(gameport, A3D_MAX_START); s = gameport_time(gameport, A3D_MAX_STROBE); local_irq_save(flags); gameport_trigger(gameport); v = gameport_read(gameport); while (t > 0 && i < length) { t--; u = v; v = gameport_read(gameport); if (~v & u & 0x10) { data[i++] = v >> 5; t = s; } } local_irq_restore(flags); return i; } /* * a3d_csum() computes checksum of triplet packet */ static int a3d_csum(char *data, int count) { int i, csum = 0; for (i = 0; i < count - 2; i++) csum += data[i]; return (csum & 0x3f) != ((data[count - 2] << 3) | data[count - 1]); } static void a3d_read(struct a3d *a3d, unsigned char *data) { struct input_dev *dev = a3d->dev; switch (a3d->mode) { case A3D_MODE_A3D: case A3D_MODE_OEM: case A3D_MODE_PAN: input_report_rel(dev, REL_X, ((data[5] << 6) | (data[6] << 3) | data[ 7]) - ((data[5] & 4) << 7)); input_report_rel(dev, REL_Y, ((data[8] << 6) | (data[9] << 3) | data[10]) - ((data[8] & 4) << 7)); input_report_key(dev, BTN_RIGHT, data[2] & 1); input_report_key(dev, BTN_LEFT, data[3] & 2); input_report_key(dev, BTN_MIDDLE, data[3] & 4); input_sync(dev); a3d->axes[0] = ((signed char)((data[11] << 6) | (data[12] << 3) | (data[13]))) + 128; a3d->axes[1] = ((signed char)((data[14] << 6) | (data[15] << 3) | (data[16]))) + 128; a3d->axes[2] = ((signed char)((data[17] << 6) | (data[18] << 3) | (data[19]))) + 128; a3d->axes[3] = ((signed char)((data[20] << 6) | (data[21] << 3) | (data[22]))) + 128; a3d->buttons = ((data[3] << 3) | data[4]) & 0xf; break; case A3D_MODE_PXL: input_report_rel(dev, REL_X, ((data[ 9] << 6) | (data[10] << 3) | data[11]) - ((data[ 9] & 4) << 7)); input_report_rel(dev, REL_Y, ((data[12] << 6) | (data[13] << 3) | data[14]) - ((data[12] & 4) << 7)); input_report_key(dev, BTN_RIGHT, data[2] & 1); input_report_key(dev, BTN_LEFT, data[3] & 2); input_report_key(dev, BTN_MIDDLE, data[3] & 4); input_report_key(dev, BTN_SIDE, data[7] & 2); input_report_key(dev, BTN_EXTRA, data[7] & 4); input_report_abs(dev, ABS_X, ((signed char)((data[15] << 6) | (data[16] << 3) | (data[17]))) + 128); input_report_abs(dev, ABS_Y, ((signed char)((data[18] << 6) | (data[19] << 3) | (data[20]))) + 128); input_report_abs(dev, ABS_RUDDER, ((signed char)((data[21] << 6) | (data[22] << 3) | (data[23]))) + 128); input_report_abs(dev, ABS_THROTTLE, ((signed char)((data[24] << 6) | (data[25] << 3) | (data[26]))) + 128); input_report_abs(dev, ABS_HAT0X, ( data[5] & 1) - ((data[5] >> 2) & 1)); input_report_abs(dev, ABS_HAT0Y, ((data[5] >> 1) & 1) - ((data[6] >> 2) & 1)); input_report_abs(dev, ABS_HAT1X, ((data[4] >> 1) & 1) - ( data[3] & 1)); input_report_abs(dev, ABS_HAT1Y, ((data[4] >> 2) & 1) - ( data[4] & 1)); input_report_key(dev, BTN_TRIGGER, data[8] & 1); input_report_key(dev, BTN_THUMB, data[8] & 2); input_report_key(dev, BTN_TOP, data[8] & 4); input_report_key(dev, BTN_PINKIE, data[7] & 1); input_sync(dev); break; } } /* * a3d_poll() reads and analyzes A3D joystick data. */ static void a3d_poll(struct gameport *gameport) { struct a3d *a3d = gameport_get_drvdata(gameport); unsigned char data[A3D_MAX_LENGTH]; a3d->reads++; if (a3d_read_packet(a3d->gameport, a3d->length, data) != a3d->length || data[0] != a3d->mode || a3d_csum(data, a3d->length)) a3d->bads++; else a3d_read(a3d, data); } /* * a3d_adc_cooked_read() copies the acis and button data to the * callers arrays. It could do the read itself, but the caller could * call this more than 50 times a second, which would use too much CPU. */ static int a3d_adc_cooked_read(struct gameport *gameport, int *axes, int *buttons) { struct a3d *a3d = gameport->port_data; int i; for (i = 0; i < 4; i++) axes[i] = (a3d->axes[i] < 254) ? a3d->axes[i] : -1; *buttons = a3d->buttons; return 0; } /* * a3d_adc_open() is the gameport open routine. It refuses to serve * any but cooked data. */ static int a3d_adc_open(struct gameport *gameport, int mode) { struct a3d *a3d = gameport->port_data; if (mode != GAMEPORT_MODE_COOKED) return -1; gameport_start_polling(a3d->gameport); return 0; } /* * a3d_adc_close() is a callback from the input close routine. */ static void a3d_adc_close(struct gameport *gameport) { struct a3d *a3d = gameport->port_data; gameport_stop_polling(a3d->gameport); } /* * a3d_open() is a callback from the input open routine. */ static int a3d_open(struct input_dev *dev) { struct a3d *a3d = input_get_drvdata(dev); gameport_start_polling(a3d->gameport); return 0; } /* * a3d_close() is a callback from the input close routine. */ static void a3d_close(struct input_dev *dev) { struct a3d *a3d = input_get_drvdata(dev); gameport_stop_polling(a3d->gameport); } /* * a3d_connect() probes for A3D joysticks. */ static int a3d_connect(struct gameport *gameport, struct gameport_driver *drv) { struct a3d *a3d; struct input_dev *input_dev; struct gameport *adc; unsigned char data[A3D_MAX_LENGTH]; int i; int err; a3d = kzalloc(sizeof(struct a3d), GFP_KERNEL); input_dev = input_allocate_device(); if (!a3d || !input_dev) { err = -ENOMEM; goto fail1; } a3d->dev = input_dev; a3d->gameport = gameport; gameport_set_drvdata(gameport, a3d); err = gameport_open(gameport, drv, GAMEPORT_MODE_RAW); if (err) goto fail1; i = a3d_read_packet(gameport, A3D_MAX_LENGTH, data); if (!i || a3d_csum(data, i)) { err = -ENODEV; goto fail2; } a3d->mode = data[0]; if (!a3d->mode || a3d->mode > 5) { printk(KERN_WARNING "a3d.c: Unknown A3D device detected " "(%s, id=%d), contact <vojtech@ucw.cz>\n", gameport->phys, a3d->mode); err = -ENODEV; goto fail2; } gameport_set_poll_handler(gameport, a3d_poll); gameport_set_poll_interval(gameport, 20); snprintf(a3d->phys, sizeof(a3d->phys), "%s/input0", gameport->phys); input_dev->name = a3d_names[a3d->mode]; input_dev->phys = a3d->phys; input_dev->id.bustype = BUS_GAMEPORT; input_dev->id.vendor = GAMEPORT_ID_VENDOR_MADCATZ; input_dev->id.product = a3d->mode; input_dev->id.version = 0x0100; input_dev->dev.parent = &gameport->dev; input_dev->open = a3d_open; input_dev->close = a3d_close; input_set_drvdata(input_dev, a3d); if (a3d->mode == A3D_MODE_PXL) { int axes[] = { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER }; a3d->length = 33; input_dev->evbit[0] |= BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); input_dev->relbit[0] |= BIT_MASK(REL_X) | BIT_MASK(REL_Y); input_dev->absbit[0] |= BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) | BIT_MASK(ABS_THROTTLE) | BIT_MASK(ABS_RUDDER) | BIT_MASK(ABS_HAT0X) | BIT_MASK(ABS_HAT0Y) | BIT_MASK(ABS_HAT1X) | BIT_MASK(ABS_HAT1Y); input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA); input_dev->keybit[BIT_WORD(BTN_JOYSTICK)] |= BIT_MASK(BTN_TRIGGER) | BIT_MASK(BTN_THUMB) | BIT_MASK(BTN_TOP) | BIT_MASK(BTN_PINKIE); a3d_read(a3d, data); for (i = 0; i < 4; i++) { if (i < 2) input_set_abs_params(input_dev, axes[i], 48, input_abs_get_val(input_dev, axes[i]) * 2 - 48, 0, 8); else input_set_abs_params(input_dev, axes[i], 2, 253, 0, 0); input_set_abs_params(input_dev, ABS_HAT0X + i, -1, 1, 0, 0); } } else { a3d->length = 29; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); input_dev->relbit[0] |= BIT_MASK(REL_X) | BIT_MASK(REL_Y); input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE); a3d_read(a3d, data); if (!(a3d->adc = adc = gameport_allocate_port())) printk(KERN_ERR "a3d: Not enough memory for ADC port\n"); else { adc->port_data = a3d; adc->open = a3d_adc_open; adc->close = a3d_adc_close; adc->cooked_read = a3d_adc_cooked_read; adc->fuzz = 1; gameport_set_name(adc, a3d_names[a3d->mode]); gameport_set_phys(adc, "%s/gameport0", gameport->phys); adc->dev.parent = &gameport->dev; gameport_register_port(adc); } } err = input_register_device(a3d->dev); if (err) goto fail3; return 0; fail3: if (a3d->adc) gameport_unregister_port(a3d->adc); fail2: gameport_close(gameport); fail1: gameport_set_drvdata(gameport, NULL); input_free_device(input_dev); kfree(a3d); return err; } static void a3d_disconnect(struct gameport *gameport) { struct a3d *a3d = gameport_get_drvdata(gameport); input_unregister_device(a3d->dev); if (a3d->adc) gameport_unregister_port(a3d->adc); gameport_close(gameport); gameport_set_drvdata(gameport, NULL); kfree(a3d); } static struct gameport_driver a3d_drv = { .driver = { .name = "adc", .owner = THIS_MODULE, }, .description = DRIVER_DESC, .connect = a3d_connect, .disconnect = a3d_disconnect, }; static int __init a3d_init(void) { return gameport_register_driver(&a3d_drv); } static void __exit a3d_exit(void) { gameport_unregister_driver(&a3d_drv); } module_init(a3d_init); module_exit(a3d_exit);
gpl-2.0
leyarx/android_kernel_wexler_qc750
net/sched/sch_blackhole.c
14137
1240
/* * net/sched/sch_blackhole.c Black hole queue * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> * * Note: Quantum tunneling is not supported. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) { qdisc_drop(skb, sch); return NET_XMIT_SUCCESS; } static struct sk_buff *blackhole_dequeue(struct Qdisc *sch) { return NULL; } static struct Qdisc_ops blackhole_qdisc_ops __read_mostly = { .id = "blackhole", .priv_size = 0, .enqueue = blackhole_enqueue, .dequeue = blackhole_dequeue, .peek = blackhole_dequeue, .owner = THIS_MODULE, }; static int __init blackhole_module_init(void) { return register_qdisc(&blackhole_qdisc_ops); } static void __exit blackhole_module_exit(void) { unregister_qdisc(&blackhole_qdisc_ops); } module_init(blackhole_module_init) module_exit(blackhole_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
ntrdma/ntrdma
drivers/tty/moxa.c
58
53683
/*****************************************************************************/ /* * moxa.c -- MOXA Intellio family multiport serial driver. * * Copyright (C) 1999-2000 Moxa Technologies (support@moxa.com). * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * * This code is loosely based on the Linux serial driver, written by * Linus Torvalds, Theodore T'so and others. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* * MOXA Intellio Series Driver * for : LINUX * date : 1999/1/7 * version : 5.1 */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/serial.h> #include <linux/tty_driver.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/ratelimit.h> #include <asm/io.h> #include <linux/uaccess.h> #include "moxa.h" #define MOXA_VERSION "6.0k" #define MOXA_FW_HDRLEN 32 #define MOXAMAJOR 172 #define MAX_BOARDS 4 /* Don't change this value */ #define MAX_PORTS_PER_BOARD 32 /* Don't change this value */ #define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD) #define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_ISA || \ (brd)->boardType == MOXA_BOARD_C320_PCI) /* * Define the Moxa PCI vendor and device IDs. */ #define MOXA_BUS_TYPE_ISA 0 #define MOXA_BUS_TYPE_PCI 1 enum { MOXA_BOARD_C218_PCI = 1, MOXA_BOARD_C218_ISA, MOXA_BOARD_C320_PCI, MOXA_BOARD_C320_ISA, MOXA_BOARD_CP204J, }; static char *moxa_brdname[] = { "C218 Turbo PCI series", "C218 Turbo ISA series", "C320 Turbo PCI series", "C320 Turbo ISA series", "CP-204J series", }; #ifdef CONFIG_PCI static struct pci_device_id moxa_pcibrds[] = { { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C218), .driver_data = MOXA_BOARD_C218_PCI }, { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C320), .driver_data = MOXA_BOARD_C320_PCI }, { PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP204J), .driver_data = MOXA_BOARD_CP204J }, { 0 } }; MODULE_DEVICE_TABLE(pci, moxa_pcibrds); #endif /* CONFIG_PCI */ struct moxa_port; static struct moxa_board_conf { int boardType; int numPorts; int busType; unsigned int ready; struct moxa_port *ports; void __iomem *basemem; void __iomem *intNdx; void __iomem *intPend; void __iomem *intTable; } moxa_boards[MAX_BOARDS]; struct mxser_mstatus { tcflag_t cflag; int cts; int dsr; int ri; int dcd; }; struct moxaq_str { int inq; int outq; }; struct moxa_port { struct tty_port port; struct moxa_board_conf *board; void __iomem *tableAddr; int type; int cflag; unsigned long statusflags; u8 DCDState; /* Protected by the port lock */ u8 lineCtrl; u8 lowChkFlag; }; struct mon_str { int tick; int rxcnt[MAX_PORTS]; int txcnt[MAX_PORTS]; }; /* statusflags */ #define TXSTOPPED 1 #define LOWWAIT 2 #define EMPTYWAIT 3 #define WAKEUP_CHARS 256 static int ttymajor = MOXAMAJOR; static struct mon_str moxaLog; static unsigned int moxaFuncTout = HZ / 2; static unsigned int moxaLowWaterChk; static DEFINE_MUTEX(moxa_openlock); static DEFINE_SPINLOCK(moxa_lock); static unsigned long baseaddr[MAX_BOARDS]; static unsigned int type[MAX_BOARDS]; static unsigned int numports[MAX_BOARDS]; static struct tty_port moxa_service_port; MODULE_AUTHOR("William Chen"); MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("c218tunx.cod"); MODULE_FIRMWARE("cp204unx.cod"); MODULE_FIRMWARE("c320tunx.cod"); module_param_array(type, uint, NULL, 0); MODULE_PARM_DESC(type, "card type: C218=2, C320=4"); module_param_array(baseaddr, ulong, NULL, 0); MODULE_PARM_DESC(baseaddr, "base address"); module_param_array(numports, uint, NULL, 0); MODULE_PARM_DESC(numports, "numports (ignored for C218)"); module_param(ttymajor, int, 0); /* * static functions: */ static int moxa_open(struct tty_struct *, struct file *); static void moxa_close(struct tty_struct *, struct file *); static int moxa_write(struct tty_struct *, const unsigned char *, int); static int moxa_write_room(struct tty_struct *); static void moxa_flush_buffer(struct tty_struct *); static int moxa_chars_in_buffer(struct tty_struct *); static void moxa_set_termios(struct tty_struct *, struct ktermios *); static void moxa_stop(struct tty_struct *); static void moxa_start(struct tty_struct *); static void moxa_hangup(struct tty_struct *); static int moxa_tiocmget(struct tty_struct *tty); static int moxa_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static void moxa_poll(unsigned long); static void moxa_set_tty_param(struct tty_struct *, struct ktermios *); static void moxa_shutdown(struct tty_port *); static int moxa_carrier_raised(struct tty_port *); static void moxa_dtr_rts(struct tty_port *, int); /* * moxa board interface functions: */ static void MoxaPortEnable(struct moxa_port *); static void MoxaPortDisable(struct moxa_port *); static int MoxaPortSetTermio(struct moxa_port *, struct ktermios *, speed_t); static int MoxaPortGetLineOut(struct moxa_port *, int *, int *); static void MoxaPortLineCtrl(struct moxa_port *, int, int); static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int); static int MoxaPortLineStatus(struct moxa_port *); static void MoxaPortFlushData(struct moxa_port *, int); static int MoxaPortWriteData(struct tty_struct *, const unsigned char *, int); static int MoxaPortReadData(struct moxa_port *); static int MoxaPortTxQueue(struct moxa_port *); static int MoxaPortRxQueue(struct moxa_port *); static int MoxaPortTxFree(struct moxa_port *); static void MoxaPortTxDisable(struct moxa_port *); static void MoxaPortTxEnable(struct moxa_port *); static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *); static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *); static void MoxaSetFifo(struct moxa_port *port, int enable); /* * I/O functions */ static DEFINE_SPINLOCK(moxafunc_lock); static void moxa_wait_finish(void __iomem *ofsAddr) { unsigned long end = jiffies + moxaFuncTout; while (readw(ofsAddr + FuncCode) != 0) if (time_after(jiffies, end)) return; if (readw(ofsAddr + FuncCode) != 0) printk_ratelimited(KERN_WARNING "moxa function expired\n"); } static void moxafunc(void __iomem *ofsAddr, u16 cmd, u16 arg) { unsigned long flags; spin_lock_irqsave(&moxafunc_lock, flags); writew(arg, ofsAddr + FuncArg); writew(cmd, ofsAddr + FuncCode); moxa_wait_finish(ofsAddr); spin_unlock_irqrestore(&moxafunc_lock, flags); } static int moxafuncret(void __iomem *ofsAddr, u16 cmd, u16 arg) { unsigned long flags; u16 ret; spin_lock_irqsave(&moxafunc_lock, flags); writew(arg, ofsAddr + FuncArg); writew(cmd, ofsAddr + FuncCode); moxa_wait_finish(ofsAddr); ret = readw(ofsAddr + FuncArg); spin_unlock_irqrestore(&moxafunc_lock, flags); return ret; } static void moxa_low_water_check(void __iomem *ofsAddr) { u16 rptr, wptr, mask, len; if (readb(ofsAddr + FlagStat) & Xoff_state) { rptr = readw(ofsAddr + RXrptr); wptr = readw(ofsAddr + RXwptr); mask = readw(ofsAddr + RX_mask); len = (wptr - rptr) & mask; if (len <= Low_water) moxafunc(ofsAddr, FC_SendXon, 0); } } /* * TTY operations */ static int moxa_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct moxa_port *ch = tty->driver_data; void __user *argp = (void __user *)arg; int status, ret = 0; if (tty->index == MAX_PORTS) { if (cmd != MOXA_GETDATACOUNT && cmd != MOXA_GET_IOQUEUE && cmd != MOXA_GETMSTATUS) return -EINVAL; } else if (!ch) return -ENODEV; switch (cmd) { case MOXA_GETDATACOUNT: moxaLog.tick = jiffies; if (copy_to_user(argp, &moxaLog, sizeof(moxaLog))) ret = -EFAULT; break; case MOXA_FLUSH_QUEUE: MoxaPortFlushData(ch, arg); break; case MOXA_GET_IOQUEUE: { struct moxaq_str __user *argm = argp; struct moxaq_str tmp; struct moxa_port *p; unsigned int i, j; for (i = 0; i < MAX_BOARDS; i++) { p = moxa_boards[i].ports; for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) { memset(&tmp, 0, sizeof(tmp)); spin_lock_bh(&moxa_lock); if (moxa_boards[i].ready) { tmp.inq = MoxaPortRxQueue(p); tmp.outq = MoxaPortTxQueue(p); } spin_unlock_bh(&moxa_lock); if (copy_to_user(argm, &tmp, sizeof(tmp))) return -EFAULT; } } break; } case MOXA_GET_OQUEUE: status = MoxaPortTxQueue(ch); ret = put_user(status, (unsigned long __user *)argp); break; case MOXA_GET_IQUEUE: status = MoxaPortRxQueue(ch); ret = put_user(status, (unsigned long __user *)argp); break; case MOXA_GETMSTATUS: { struct mxser_mstatus __user *argm = argp; struct mxser_mstatus tmp; struct moxa_port *p; unsigned int i, j; for (i = 0; i < MAX_BOARDS; i++) { p = moxa_boards[i].ports; for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) { struct tty_struct *ttyp; memset(&tmp, 0, sizeof(tmp)); spin_lock_bh(&moxa_lock); if (!moxa_boards[i].ready) { spin_unlock_bh(&moxa_lock); goto copy; } status = MoxaPortLineStatus(p); spin_unlock_bh(&moxa_lock); if (status & 1) tmp.cts = 1; if (status & 2) tmp.dsr = 1; if (status & 4) tmp.dcd = 1; ttyp = tty_port_tty_get(&p->port); if (!ttyp) tmp.cflag = p->cflag; else tmp.cflag = ttyp->termios.c_cflag; tty_kref_put(ttyp); copy: if (copy_to_user(argm, &tmp, sizeof(tmp))) return -EFAULT; } } break; } case TIOCGSERIAL: mutex_lock(&ch->port.mutex); ret = moxa_get_serial_info(ch, argp); mutex_unlock(&ch->port.mutex); break; case TIOCSSERIAL: mutex_lock(&ch->port.mutex); ret = moxa_set_serial_info(ch, argp); mutex_unlock(&ch->port.mutex); break; default: ret = -ENOIOCTLCMD; } return ret; } static int moxa_break_ctl(struct tty_struct *tty, int state) { struct moxa_port *port = tty->driver_data; moxafunc(port->tableAddr, state ? FC_SendBreak : FC_StopBreak, Magic_code); return 0; } static const struct tty_operations moxa_ops = { .open = moxa_open, .close = moxa_close, .write = moxa_write, .write_room = moxa_write_room, .flush_buffer = moxa_flush_buffer, .chars_in_buffer = moxa_chars_in_buffer, .ioctl = moxa_ioctl, .set_termios = moxa_set_termios, .stop = moxa_stop, .start = moxa_start, .hangup = moxa_hangup, .break_ctl = moxa_break_ctl, .tiocmget = moxa_tiocmget, .tiocmset = moxa_tiocmset, }; static const struct tty_port_operations moxa_port_ops = { .carrier_raised = moxa_carrier_raised, .dtr_rts = moxa_dtr_rts, .shutdown = moxa_shutdown, }; static struct tty_driver *moxaDriver; static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); /* * HW init */ static int moxa_check_fw_model(struct moxa_board_conf *brd, u8 model) { switch (brd->boardType) { case MOXA_BOARD_C218_ISA: case MOXA_BOARD_C218_PCI: if (model != 1) goto err; break; case MOXA_BOARD_CP204J: if (model != 3) goto err; break; default: if (model != 2) goto err; break; } return 0; err: return -EINVAL; } static int moxa_check_fw(const void *ptr) { const __le16 *lptr = ptr; if (*lptr != cpu_to_le16(0x7980)) return -EINVAL; return 0; } static int moxa_load_bios(struct moxa_board_conf *brd, const u8 *buf, size_t len) { void __iomem *baseAddr = brd->basemem; u16 tmp; writeb(HW_reset, baseAddr + Control_reg); /* reset */ msleep(10); memset_io(baseAddr, 0, 4096); memcpy_toio(baseAddr, buf, len); /* download BIOS */ writeb(0, baseAddr + Control_reg); /* restart */ msleep(2000); switch (brd->boardType) { case MOXA_BOARD_C218_ISA: case MOXA_BOARD_C218_PCI: tmp = readw(baseAddr + C218_key); if (tmp != C218_KeyCode) goto err; break; case MOXA_BOARD_CP204J: tmp = readw(baseAddr + C218_key); if (tmp != CP204J_KeyCode) goto err; break; default: tmp = readw(baseAddr + C320_key); if (tmp != C320_KeyCode) goto err; tmp = readw(baseAddr + C320_status); if (tmp != STS_init) { printk(KERN_ERR "MOXA: bios upload failed -- CPU/Basic " "module not found\n"); return -EIO; } break; } return 0; err: printk(KERN_ERR "MOXA: bios upload failed -- board not found\n"); return -EIO; } static int moxa_load_320b(struct moxa_board_conf *brd, const u8 *ptr, size_t len) { void __iomem *baseAddr = brd->basemem; if (len < 7168) { printk(KERN_ERR "MOXA: invalid 320 bios -- too short\n"); return -EINVAL; } writew(len - 7168 - 2, baseAddr + C320bapi_len); writeb(1, baseAddr + Control_reg); /* Select Page 1 */ memcpy_toio(baseAddr + DynPage_addr, ptr, 7168); writeb(2, baseAddr + Control_reg); /* Select Page 2 */ memcpy_toio(baseAddr + DynPage_addr, ptr + 7168, len - 7168); return 0; } static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr, size_t len) { void __iomem *baseAddr = brd->basemem; const __le16 *uptr = ptr; size_t wlen, len2, j; unsigned long key, loadbuf, loadlen, checksum, checksum_ok; unsigned int i, retry; u16 usum, keycode; keycode = (brd->boardType == MOXA_BOARD_CP204J) ? CP204J_KeyCode : C218_KeyCode; switch (brd->boardType) { case MOXA_BOARD_CP204J: case MOXA_BOARD_C218_ISA: case MOXA_BOARD_C218_PCI: key = C218_key; loadbuf = C218_LoadBuf; loadlen = C218DLoad_len; checksum = C218check_sum; checksum_ok = C218chksum_ok; break; default: key = C320_key; keycode = C320_KeyCode; loadbuf = C320_LoadBuf; loadlen = C320DLoad_len; checksum = C320check_sum; checksum_ok = C320chksum_ok; break; } usum = 0; wlen = len >> 1; for (i = 0; i < wlen; i++) usum += le16_to_cpu(uptr[i]); retry = 0; do { wlen = len >> 1; j = 0; while (wlen) { len2 = (wlen > 2048) ? 2048 : wlen; wlen -= len2; memcpy_toio(baseAddr + loadbuf, ptr + j, len2 << 1); j += len2 << 1; writew(len2, baseAddr + loadlen); writew(0, baseAddr + key); for (i = 0; i < 100; i++) { if (readw(baseAddr + key) == keycode) break; msleep(10); } if (readw(baseAddr + key) != keycode) return -EIO; } writew(0, baseAddr + loadlen); writew(usum, baseAddr + checksum); writew(0, baseAddr + key); for (i = 0; i < 100; i++) { if (readw(baseAddr + key) == keycode) break; msleep(10); } retry++; } while ((readb(baseAddr + checksum_ok) != 1) && (retry < 3)); if (readb(baseAddr + checksum_ok) != 1) return -EIO; writew(0, baseAddr + key); for (i = 0; i < 600; i++) { if (readw(baseAddr + Magic_no) == Magic_code) break; msleep(10); } if (readw(baseAddr + Magic_no) != Magic_code) return -EIO; if (MOXA_IS_320(brd)) { if (brd->busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */ writew(0x3800, baseAddr + TMS320_PORT1); writew(0x3900, baseAddr + TMS320_PORT2); writew(28499, baseAddr + TMS320_CLOCK); } else { writew(0x3200, baseAddr + TMS320_PORT1); writew(0x3400, baseAddr + TMS320_PORT2); writew(19999, baseAddr + TMS320_CLOCK); } } writew(1, baseAddr + Disable_IRQ); writew(0, baseAddr + Magic_no); for (i = 0; i < 500; i++) { if (readw(baseAddr + Magic_no) == Magic_code) break; msleep(10); } if (readw(baseAddr + Magic_no) != Magic_code) return -EIO; if (MOXA_IS_320(brd)) { j = readw(baseAddr + Module_cnt); if (j <= 0) return -EIO; brd->numPorts = j * 8; writew(j, baseAddr + Module_no); writew(0, baseAddr + Magic_no); for (i = 0; i < 600; i++) { if (readw(baseAddr + Magic_no) == Magic_code) break; msleep(10); } if (readw(baseAddr + Magic_no) != Magic_code) return -EIO; } brd->intNdx = baseAddr + IRQindex; brd->intPend = baseAddr + IRQpending; brd->intTable = baseAddr + IRQtable; return 0; } static int moxa_load_code(struct moxa_board_conf *brd, const void *ptr, size_t len) { void __iomem *ofsAddr, *baseAddr = brd->basemem; struct moxa_port *port; int retval, i; if (len % 2) { printk(KERN_ERR "MOXA: bios length is not even\n"); return -EINVAL; } retval = moxa_real_load_code(brd, ptr, len); /* may change numPorts */ if (retval) return retval; switch (brd->boardType) { case MOXA_BOARD_C218_ISA: case MOXA_BOARD_C218_PCI: case MOXA_BOARD_CP204J: port = brd->ports; for (i = 0; i < brd->numPorts; i++, port++) { port->board = brd; port->DCDState = 0; port->tableAddr = baseAddr + Extern_table + Extern_size * i; ofsAddr = port->tableAddr; writew(C218rx_mask, ofsAddr + RX_mask); writew(C218tx_mask, ofsAddr + TX_mask); writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb); writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb); writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb); writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb); } break; default: port = brd->ports; for (i = 0; i < brd->numPorts; i++, port++) { port->board = brd; port->DCDState = 0; port->tableAddr = baseAddr + Extern_table + Extern_size * i; ofsAddr = port->tableAddr; switch (brd->numPorts) { case 8: writew(C320p8rx_mask, ofsAddr + RX_mask); writew(C320p8tx_mask, ofsAddr + TX_mask); writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb); writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb); writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb); writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb); break; case 16: writew(C320p16rx_mask, ofsAddr + RX_mask); writew(C320p16tx_mask, ofsAddr + TX_mask); writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb); writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb); writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb); writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb); break; case 24: writew(C320p24rx_mask, ofsAddr + RX_mask); writew(C320p24tx_mask, ofsAddr + TX_mask); writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb); writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb); writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb); writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb); break; case 32: writew(C320p32rx_mask, ofsAddr + RX_mask); writew(C320p32tx_mask, ofsAddr + TX_mask); writew(C320p32tx_ofs, ofsAddr + Ofs_txb); writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb); writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb); writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb); writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb); break; } } break; } return 0; } static int moxa_load_fw(struct moxa_board_conf *brd, const struct firmware *fw) { const void *ptr = fw->data; char rsn[64]; u16 lens[5]; size_t len; unsigned int a, lenp, lencnt; int ret = -EINVAL; struct { __le32 magic; /* 0x34303430 */ u8 reserved1[2]; u8 type; /* UNIX = 3 */ u8 model; /* C218T=1, C320T=2, CP204=3 */ u8 reserved2[8]; __le16 len[5]; } const *hdr = ptr; BUILD_BUG_ON(ARRAY_SIZE(hdr->len) != ARRAY_SIZE(lens)); if (fw->size < MOXA_FW_HDRLEN) { strcpy(rsn, "too short (even header won't fit)"); goto err; } if (hdr->magic != cpu_to_le32(0x30343034)) { sprintf(rsn, "bad magic: %.8x", le32_to_cpu(hdr->magic)); goto err; } if (hdr->type != 3) { sprintf(rsn, "not for linux, type is %u", hdr->type); goto err; } if (moxa_check_fw_model(brd, hdr->model)) { sprintf(rsn, "not for this card, model is %u", hdr->model); goto err; } len = MOXA_FW_HDRLEN; lencnt = hdr->model == 2 ? 5 : 3; for (a = 0; a < ARRAY_SIZE(lens); a++) { lens[a] = le16_to_cpu(hdr->len[a]); if (lens[a] && len + lens[a] <= fw->size && moxa_check_fw(&fw->data[len])) printk(KERN_WARNING "MOXA firmware: unexpected input " "at offset %u, but going on\n", (u32)len); if (!lens[a] && a < lencnt) { sprintf(rsn, "too few entries in fw file"); goto err; } len += lens[a]; } if (len != fw->size) { sprintf(rsn, "bad length: %u (should be %u)", (u32)fw->size, (u32)len); goto err; } ptr += MOXA_FW_HDRLEN; lenp = 0; /* bios */ strcpy(rsn, "read above"); ret = moxa_load_bios(brd, ptr, lens[lenp]); if (ret) goto err; /* we skip the tty section (lens[1]), since we don't need it */ ptr += lens[lenp] + lens[lenp + 1]; lenp += 2; /* comm */ if (hdr->model == 2) { ret = moxa_load_320b(brd, ptr, lens[lenp]); if (ret) goto err; /* skip another tty */ ptr += lens[lenp] + lens[lenp + 1]; lenp += 2; } ret = moxa_load_code(brd, ptr, lens[lenp]); if (ret) goto err; return 0; err: printk(KERN_ERR "firmware failed to load, reason: %s\n", rsn); return ret; } static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev) { const struct firmware *fw; const char *file; struct moxa_port *p; unsigned int i, first_idx; int ret; brd->ports = kcalloc(MAX_PORTS_PER_BOARD, sizeof(*brd->ports), GFP_KERNEL); if (brd->ports == NULL) { printk(KERN_ERR "cannot allocate memory for ports\n"); ret = -ENOMEM; goto err; } for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) { tty_port_init(&p->port); p->port.ops = &moxa_port_ops; p->type = PORT_16550A; p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; } switch (brd->boardType) { case MOXA_BOARD_C218_ISA: case MOXA_BOARD_C218_PCI: file = "c218tunx.cod"; break; case MOXA_BOARD_CP204J: file = "cp204unx.cod"; break; default: file = "c320tunx.cod"; break; } ret = request_firmware(&fw, file, dev); if (ret) { printk(KERN_ERR "MOXA: request_firmware failed. Make sure " "you've placed '%s' file into your firmware " "loader directory (e.g. /lib/firmware)\n", file); goto err_free; } ret = moxa_load_fw(brd, fw); release_firmware(fw); if (ret) goto err_free; spin_lock_bh(&moxa_lock); brd->ready = 1; if (!timer_pending(&moxaTimer)) mod_timer(&moxaTimer, jiffies + HZ / 50); spin_unlock_bh(&moxa_lock); first_idx = (brd - moxa_boards) * MAX_PORTS_PER_BOARD; for (i = 0; i < brd->numPorts; i++) tty_port_register_device(&brd->ports[i].port, moxaDriver, first_idx + i, dev); return 0; err_free: for (i = 0; i < MAX_PORTS_PER_BOARD; i++) tty_port_destroy(&brd->ports[i].port); kfree(brd->ports); err: return ret; } static void moxa_board_deinit(struct moxa_board_conf *brd) { unsigned int a, opened, first_idx; mutex_lock(&moxa_openlock); spin_lock_bh(&moxa_lock); brd->ready = 0; spin_unlock_bh(&moxa_lock); /* pci hot-un-plug support */ for (a = 0; a < brd->numPorts; a++) if (tty_port_initialized(&brd->ports[a].port)) tty_port_tty_hangup(&brd->ports[a].port, false); for (a = 0; a < MAX_PORTS_PER_BOARD; a++) tty_port_destroy(&brd->ports[a].port); while (1) { opened = 0; for (a = 0; a < brd->numPorts; a++) if (tty_port_initialized(&brd->ports[a].port)) opened++; mutex_unlock(&moxa_openlock); if (!opened) break; msleep(50); mutex_lock(&moxa_openlock); } first_idx = (brd - moxa_boards) * MAX_PORTS_PER_BOARD; for (a = 0; a < brd->numPorts; a++) tty_unregister_device(moxaDriver, first_idx + a); iounmap(brd->basemem); brd->basemem = NULL; kfree(brd->ports); } #ifdef CONFIG_PCI static int moxa_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct moxa_board_conf *board; unsigned int i; int board_type = ent->driver_data; int retval; retval = pci_enable_device(pdev); if (retval) { dev_err(&pdev->dev, "can't enable pci device\n"); goto err; } for (i = 0; i < MAX_BOARDS; i++) if (moxa_boards[i].basemem == NULL) break; retval = -ENODEV; if (i >= MAX_BOARDS) { dev_warn(&pdev->dev, "more than %u MOXA Intellio family boards " "found. Board is ignored.\n", MAX_BOARDS); goto err; } board = &moxa_boards[i]; retval = pci_request_region(pdev, 2, "moxa-base"); if (retval) { dev_err(&pdev->dev, "can't request pci region 2\n"); goto err; } board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000); if (board->basemem == NULL) { dev_err(&pdev->dev, "can't remap io space 2\n"); retval = -ENOMEM; goto err_reg; } board->boardType = board_type; switch (board_type) { case MOXA_BOARD_C218_ISA: case MOXA_BOARD_C218_PCI: board->numPorts = 8; break; case MOXA_BOARD_CP204J: board->numPorts = 4; break; default: board->numPorts = 0; break; } board->busType = MOXA_BUS_TYPE_PCI; retval = moxa_init_board(board, &pdev->dev); if (retval) goto err_base; pci_set_drvdata(pdev, board); dev_info(&pdev->dev, "board '%s' ready (%u ports, firmware loaded)\n", moxa_brdname[board_type - 1], board->numPorts); return 0; err_base: iounmap(board->basemem); board->basemem = NULL; err_reg: pci_release_region(pdev, 2); err: return retval; } static void moxa_pci_remove(struct pci_dev *pdev) { struct moxa_board_conf *brd = pci_get_drvdata(pdev); moxa_board_deinit(brd); pci_release_region(pdev, 2); } static struct pci_driver moxa_pci_driver = { .name = "moxa", .id_table = moxa_pcibrds, .probe = moxa_pci_probe, .remove = moxa_pci_remove }; #endif /* CONFIG_PCI */ static int __init moxa_init(void) { unsigned int isabrds = 0; int retval = 0; struct moxa_board_conf *brd = moxa_boards; unsigned int i; printk(KERN_INFO "MOXA Intellio family driver version %s\n", MOXA_VERSION); tty_port_init(&moxa_service_port); moxaDriver = tty_alloc_driver(MAX_PORTS + 1, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); if (IS_ERR(moxaDriver)) return PTR_ERR(moxaDriver); moxaDriver->name = "ttyMX"; moxaDriver->major = ttymajor; moxaDriver->minor_start = 0; moxaDriver->type = TTY_DRIVER_TYPE_SERIAL; moxaDriver->subtype = SERIAL_TYPE_NORMAL; moxaDriver->init_termios = tty_std_termios; moxaDriver->init_termios.c_cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; moxaDriver->init_termios.c_ispeed = 9600; moxaDriver->init_termios.c_ospeed = 9600; tty_set_operations(moxaDriver, &moxa_ops); /* Having one more port only for ioctls is ugly */ tty_port_link_device(&moxa_service_port, moxaDriver, MAX_PORTS); if (tty_register_driver(moxaDriver)) { printk(KERN_ERR "can't register MOXA Smartio tty driver!\n"); put_tty_driver(moxaDriver); return -1; } /* Find the boards defined from module args. */ for (i = 0; i < MAX_BOARDS; i++) { if (!baseaddr[i]) break; if (type[i] == MOXA_BOARD_C218_ISA || type[i] == MOXA_BOARD_C320_ISA) { pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n", isabrds + 1, moxa_brdname[type[i] - 1], baseaddr[i]); brd->boardType = type[i]; brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 : numports[i]; brd->busType = MOXA_BUS_TYPE_ISA; brd->basemem = ioremap_nocache(baseaddr[i], 0x4000); if (!brd->basemem) { printk(KERN_ERR "MOXA: can't remap %lx\n", baseaddr[i]); continue; } if (moxa_init_board(brd, NULL)) { iounmap(brd->basemem); brd->basemem = NULL; continue; } printk(KERN_INFO "MOXA isa board found at 0x%.8lx and " "ready (%u ports, firmware loaded)\n", baseaddr[i], brd->numPorts); brd++; isabrds++; } } #ifdef CONFIG_PCI retval = pci_register_driver(&moxa_pci_driver); if (retval) { printk(KERN_ERR "Can't register MOXA pci driver!\n"); if (isabrds) retval = 0; } #endif return retval; } static void __exit moxa_exit(void) { unsigned int i; #ifdef CONFIG_PCI pci_unregister_driver(&moxa_pci_driver); #endif for (i = 0; i < MAX_BOARDS; i++) /* ISA boards */ if (moxa_boards[i].ready) moxa_board_deinit(&moxa_boards[i]); del_timer_sync(&moxaTimer); if (tty_unregister_driver(moxaDriver)) printk(KERN_ERR "Couldn't unregister MOXA Intellio family " "serial driver\n"); put_tty_driver(moxaDriver); } module_init(moxa_init); module_exit(moxa_exit); static void moxa_shutdown(struct tty_port *port) { struct moxa_port *ch = container_of(port, struct moxa_port, port); MoxaPortDisable(ch); MoxaPortFlushData(ch, 2); } static int moxa_carrier_raised(struct tty_port *port) { struct moxa_port *ch = container_of(port, struct moxa_port, port); int dcd; spin_lock_irq(&port->lock); dcd = ch->DCDState; spin_unlock_irq(&port->lock); return dcd; } static void moxa_dtr_rts(struct tty_port *port, int onoff) { struct moxa_port *ch = container_of(port, struct moxa_port, port); MoxaPortLineCtrl(ch, onoff, onoff); } static int moxa_open(struct tty_struct *tty, struct file *filp) { struct moxa_board_conf *brd; struct moxa_port *ch; int port; port = tty->index; if (port == MAX_PORTS) { return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; } if (mutex_lock_interruptible(&moxa_openlock)) return -ERESTARTSYS; brd = &moxa_boards[port / MAX_PORTS_PER_BOARD]; if (!brd->ready) { mutex_unlock(&moxa_openlock); return -ENODEV; } if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) { mutex_unlock(&moxa_openlock); return -ENODEV; } ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; ch->port.count++; tty->driver_data = ch; tty_port_tty_set(&ch->port, tty); mutex_lock(&ch->port.mutex); if (!tty_port_initialized(&ch->port)) { ch->statusflags = 0; moxa_set_tty_param(tty, &tty->termios); MoxaPortLineCtrl(ch, 1, 1); MoxaPortEnable(ch); MoxaSetFifo(ch, ch->type == PORT_16550A); tty_port_set_initialized(&ch->port, 1); } mutex_unlock(&ch->port.mutex); mutex_unlock(&moxa_openlock); return tty_port_block_til_ready(&ch->port, tty, filp); } static void moxa_close(struct tty_struct *tty, struct file *filp) { struct moxa_port *ch = tty->driver_data; ch->cflag = tty->termios.c_cflag; tty_port_close(&ch->port, tty, filp); } static int moxa_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct moxa_port *ch = tty->driver_data; unsigned long flags; int len; if (ch == NULL) return 0; spin_lock_irqsave(&moxa_lock, flags); len = MoxaPortWriteData(tty, buf, count); spin_unlock_irqrestore(&moxa_lock, flags); set_bit(LOWWAIT, &ch->statusflags); return len; } static int moxa_write_room(struct tty_struct *tty) { struct moxa_port *ch; if (tty->stopped) return 0; ch = tty->driver_data; if (ch == NULL) return 0; return MoxaPortTxFree(ch); } static void moxa_flush_buffer(struct tty_struct *tty) { struct moxa_port *ch = tty->driver_data; if (ch == NULL) return; MoxaPortFlushData(ch, 1); tty_wakeup(tty); } static int moxa_chars_in_buffer(struct tty_struct *tty) { struct moxa_port *ch = tty->driver_data; int chars; chars = MoxaPortTxQueue(ch); if (chars) /* * Make it possible to wakeup anything waiting for output * in tty_ioctl.c, etc. */ set_bit(EMPTYWAIT, &ch->statusflags); return chars; } static int moxa_tiocmget(struct tty_struct *tty) { struct moxa_port *ch = tty->driver_data; int flag = 0, dtr, rts; MoxaPortGetLineOut(ch, &dtr, &rts); if (dtr) flag |= TIOCM_DTR; if (rts) flag |= TIOCM_RTS; dtr = MoxaPortLineStatus(ch); if (dtr & 1) flag |= TIOCM_CTS; if (dtr & 2) flag |= TIOCM_DSR; if (dtr & 4) flag |= TIOCM_CD; return flag; } static int moxa_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct moxa_port *ch; int dtr, rts; mutex_lock(&moxa_openlock); ch = tty->driver_data; if (!ch) { mutex_unlock(&moxa_openlock); return -EINVAL; } MoxaPortGetLineOut(ch, &dtr, &rts); if (set & TIOCM_RTS) rts = 1; if (set & TIOCM_DTR) dtr = 1; if (clear & TIOCM_RTS) rts = 0; if (clear & TIOCM_DTR) dtr = 0; MoxaPortLineCtrl(ch, dtr, rts); mutex_unlock(&moxa_openlock); return 0; } static void moxa_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct moxa_port *ch = tty->driver_data; if (ch == NULL) return; moxa_set_tty_param(tty, old_termios); if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty)) wake_up_interruptible(&ch->port.open_wait); } static void moxa_stop(struct tty_struct *tty) { struct moxa_port *ch = tty->driver_data; if (ch == NULL) return; MoxaPortTxDisable(ch); set_bit(TXSTOPPED, &ch->statusflags); } static void moxa_start(struct tty_struct *tty) { struct moxa_port *ch = tty->driver_data; if (ch == NULL) return; if (!test_bit(TXSTOPPED, &ch->statusflags)) return; MoxaPortTxEnable(ch); clear_bit(TXSTOPPED, &ch->statusflags); } static void moxa_hangup(struct tty_struct *tty) { struct moxa_port *ch = tty->driver_data; tty_port_hangup(&ch->port); } static void moxa_new_dcdstate(struct moxa_port *p, u8 dcd) { unsigned long flags; dcd = !!dcd; spin_lock_irqsave(&p->port.lock, flags); if (dcd != p->DCDState) { p->DCDState = dcd; spin_unlock_irqrestore(&p->port.lock, flags); if (!dcd) tty_port_tty_hangup(&p->port, true); } else spin_unlock_irqrestore(&p->port.lock, flags); } static int moxa_poll_port(struct moxa_port *p, unsigned int handle, u16 __iomem *ip) { struct tty_struct *tty = tty_port_tty_get(&p->port); void __iomem *ofsAddr; unsigned int inited = tty_port_initialized(&p->port); u16 intr; if (tty) { if (test_bit(EMPTYWAIT, &p->statusflags) && MoxaPortTxQueue(p) == 0) { clear_bit(EMPTYWAIT, &p->statusflags); tty_wakeup(tty); } if (test_bit(LOWWAIT, &p->statusflags) && !tty->stopped && MoxaPortTxQueue(p) <= WAKEUP_CHARS) { clear_bit(LOWWAIT, &p->statusflags); tty_wakeup(tty); } if (inited && !tty_throttled(tty) && MoxaPortRxQueue(p) > 0) { /* RX */ MoxaPortReadData(p); tty_schedule_flip(&p->port); } } else { clear_bit(EMPTYWAIT, &p->statusflags); MoxaPortFlushData(p, 0); /* flush RX */ } if (!handle) /* nothing else to do */ goto put; intr = readw(ip); /* port irq status */ if (intr == 0) goto put; writew(0, ip); /* ACK port */ ofsAddr = p->tableAddr; if (intr & IntrTx) /* disable tx intr */ writew(readw(ofsAddr + HostStat) & ~WakeupTx, ofsAddr + HostStat); if (!inited) goto put; if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */ tty_insert_flip_char(&p->port, 0, TTY_BREAK); tty_schedule_flip(&p->port); } if (intr & IntrLine) moxa_new_dcdstate(p, readb(ofsAddr + FlagStat) & DCD_state); put: tty_kref_put(tty); return 0; } static void moxa_poll(unsigned long ignored) { struct moxa_board_conf *brd; u16 __iomem *ip; unsigned int card, port, served = 0; spin_lock(&moxa_lock); for (card = 0; card < MAX_BOARDS; card++) { brd = &moxa_boards[card]; if (!brd->ready) continue; served++; ip = NULL; if (readb(brd->intPend) == 0xff) ip = brd->intTable + readb(brd->intNdx); for (port = 0; port < brd->numPorts; port++) moxa_poll_port(&brd->ports[port], !!ip, ip + port); if (ip) writeb(0, brd->intPend); /* ACK */ if (moxaLowWaterChk) { struct moxa_port *p = brd->ports; for (port = 0; port < brd->numPorts; port++, p++) if (p->lowChkFlag) { p->lowChkFlag = 0; moxa_low_water_check(p->tableAddr); } } } moxaLowWaterChk = 0; if (served) mod_timer(&moxaTimer, jiffies + HZ / 50); spin_unlock(&moxa_lock); } /******************************************************************************/ static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios) { register struct ktermios *ts = &tty->termios; struct moxa_port *ch = tty->driver_data; int rts, cts, txflow, rxflow, xany, baud; rts = cts = txflow = rxflow = xany = 0; if (ts->c_cflag & CRTSCTS) rts = cts = 1; if (ts->c_iflag & IXON) txflow = 1; if (ts->c_iflag & IXOFF) rxflow = 1; if (ts->c_iflag & IXANY) xany = 1; /* Clear the features we don't support */ ts->c_cflag &= ~CMSPAR; MoxaPortFlowCtrl(ch, rts, cts, txflow, rxflow, xany); baud = MoxaPortSetTermio(ch, ts, tty_get_baud_rate(tty)); if (baud == -1) baud = tty_termios_baud_rate(old_termios); /* Not put the baud rate into the termios data */ tty_encode_baud_rate(tty, baud, baud); } /***************************************************************************** * Driver level functions: * *****************************************************************************/ static void MoxaPortFlushData(struct moxa_port *port, int mode) { void __iomem *ofsAddr; if (mode < 0 || mode > 2) return; ofsAddr = port->tableAddr; moxafunc(ofsAddr, FC_FlushQueue, mode); if (mode != 1) { port->lowChkFlag = 0; moxa_low_water_check(ofsAddr); } } /* * Moxa Port Number Description: * * MOXA serial driver supports up to 4 MOXA-C218/C320 boards. And, * the port number using in MOXA driver functions will be 0 to 31 for * first MOXA board, 32 to 63 for second, 64 to 95 for third and 96 * to 127 for fourth. For example, if you setup three MOXA boards, * first board is C218, second board is C320-16 and third board is * C320-32. The port number of first board (C218 - 8 ports) is from * 0 to 7. The port number of second board (C320 - 16 ports) is form * 32 to 47. The port number of third board (C320 - 32 ports) is from * 64 to 95. And those port numbers form 8 to 31, 48 to 63 and 96 to * 127 will be invalid. * * * Moxa Functions Description: * * Function 1: Driver initialization routine, this routine must be * called when initialized driver. * Syntax: * void MoxaDriverInit(); * * * Function 2: Moxa driver private IOCTL command processing. * Syntax: * int MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port); * * unsigned int cmd : IOCTL command * unsigned long arg : IOCTL argument * int port : port number (0 - 127) * * return: 0 (OK) * -EINVAL * -ENOIOCTLCMD * * * Function 6: Enable this port to start Tx/Rx data. * Syntax: * void MoxaPortEnable(int port); * int port : port number (0 - 127) * * * Function 7: Disable this port * Syntax: * void MoxaPortDisable(int port); * int port : port number (0 - 127) * * * Function 10: Setting baud rate of this port. * Syntax: * speed_t MoxaPortSetBaud(int port, speed_t baud); * int port : port number (0 - 127) * long baud : baud rate (50 - 115200) * * return: 0 : this port is invalid or baud < 50 * 50 - 115200 : the real baud rate set to the port, if * the argument baud is large than maximun * available baud rate, the real setting * baud rate will be the maximun baud rate. * * * Function 12: Configure the port. * Syntax: * int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud); * int port : port number (0 - 127) * struct ktermios * termio : termio structure pointer * speed_t baud : baud rate * * return: -1 : this port is invalid or termio == NULL * 0 : setting O.K. * * * Function 13: Get the DTR/RTS state of this port. * Syntax: * int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState); * int port : port number (0 - 127) * int * dtrState : pointer to INT to receive the current DTR * state. (if NULL, this function will not * write to this address) * int * rtsState : pointer to INT to receive the current RTS * state. (if NULL, this function will not * write to this address) * * return: -1 : this port is invalid * 0 : O.K. * * * Function 14: Setting the DTR/RTS output state of this port. * Syntax: * void MoxaPortLineCtrl(int port, int dtrState, int rtsState); * int port : port number (0 - 127) * int dtrState : DTR output state (0: off, 1: on) * int rtsState : RTS output state (0: off, 1: on) * * * Function 15: Setting the flow control of this port. * Syntax: * void MoxaPortFlowCtrl(int port, int rtsFlow, int ctsFlow, int rxFlow, * int txFlow,int xany); * int port : port number (0 - 127) * int rtsFlow : H/W RTS flow control (0: no, 1: yes) * int ctsFlow : H/W CTS flow control (0: no, 1: yes) * int rxFlow : S/W Rx XON/XOFF flow control (0: no, 1: yes) * int txFlow : S/W Tx XON/XOFF flow control (0: no, 1: yes) * int xany : S/W XANY flow control (0: no, 1: yes) * * * Function 16: Get ths line status of this port * Syntax: * int MoxaPortLineStatus(int port); * int port : port number (0 - 127) * * return: Bit 0 - CTS state (0: off, 1: on) * Bit 1 - DSR state (0: off, 1: on) * Bit 2 - DCD state (0: off, 1: on) * * * Function 19: Flush the Rx/Tx buffer data of this port. * Syntax: * void MoxaPortFlushData(int port, int mode); * int port : port number (0 - 127) * int mode * 0 : flush the Rx buffer * 1 : flush the Tx buffer * 2 : flush the Rx and Tx buffer * * * Function 20: Write data. * Syntax: * int MoxaPortWriteData(int port, unsigned char * buffer, int length); * int port : port number (0 - 127) * unsigned char * buffer : pointer to write data buffer. * int length : write data length * * return: 0 - length : real write data length * * * Function 21: Read data. * Syntax: * int MoxaPortReadData(int port, struct tty_struct *tty); * int port : port number (0 - 127) * struct tty_struct *tty : tty for data * * return: 0 - length : real read data length * * * Function 24: Get the Tx buffer current queued data bytes * Syntax: * int MoxaPortTxQueue(int port); * int port : port number (0 - 127) * * return: .. : Tx buffer current queued data bytes * * * Function 25: Get the Tx buffer current free space * Syntax: * int MoxaPortTxFree(int port); * int port : port number (0 - 127) * * return: .. : Tx buffer current free space * * * Function 26: Get the Rx buffer current queued data bytes * Syntax: * int MoxaPortRxQueue(int port); * int port : port number (0 - 127) * * return: .. : Rx buffer current queued data bytes * * * Function 28: Disable port data transmission. * Syntax: * void MoxaPortTxDisable(int port); * int port : port number (0 - 127) * * * Function 29: Enable port data transmission. * Syntax: * void MoxaPortTxEnable(int port); * int port : port number (0 - 127) * * * Function 31: Get the received BREAK signal count and reset it. * Syntax: * int MoxaPortResetBrkCnt(int port); * int port : port number (0 - 127) * * return: 0 - .. : BREAK signal count * * */ static void MoxaPortEnable(struct moxa_port *port) { void __iomem *ofsAddr; u16 lowwater = 512; ofsAddr = port->tableAddr; writew(lowwater, ofsAddr + Low_water); if (MOXA_IS_320(port->board)) moxafunc(ofsAddr, FC_SetBreakIrq, 0); else writew(readw(ofsAddr + HostStat) | WakeupBreak, ofsAddr + HostStat); moxafunc(ofsAddr, FC_SetLineIrq, Magic_code); moxafunc(ofsAddr, FC_FlushQueue, 2); moxafunc(ofsAddr, FC_EnableCH, Magic_code); MoxaPortLineStatus(port); } static void MoxaPortDisable(struct moxa_port *port) { void __iomem *ofsAddr = port->tableAddr; moxafunc(ofsAddr, FC_SetFlowCtl, 0); /* disable flow control */ moxafunc(ofsAddr, FC_ClrLineIrq, Magic_code); writew(0, ofsAddr + HostStat); moxafunc(ofsAddr, FC_DisableCH, Magic_code); } static speed_t MoxaPortSetBaud(struct moxa_port *port, speed_t baud) { void __iomem *ofsAddr = port->tableAddr; unsigned int clock, val; speed_t max; max = MOXA_IS_320(port->board) ? 460800 : 921600; if (baud < 50) return 0; if (baud > max) baud = max; clock = 921600; val = clock / baud; moxafunc(ofsAddr, FC_SetBaud, val); baud = clock / val; return baud; } static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio, speed_t baud) { void __iomem *ofsAddr; tcflag_t mode = 0; ofsAddr = port->tableAddr; mode = termio->c_cflag & CSIZE; if (mode == CS5) mode = MX_CS5; else if (mode == CS6) mode = MX_CS6; else if (mode == CS7) mode = MX_CS7; else if (mode == CS8) mode = MX_CS8; if (termio->c_cflag & CSTOPB) { if (mode == MX_CS5) mode |= MX_STOP15; else mode |= MX_STOP2; } else mode |= MX_STOP1; if (termio->c_cflag & PARENB) { if (termio->c_cflag & PARODD) mode |= MX_PARODD; else mode |= MX_PAREVEN; } else mode |= MX_PARNONE; moxafunc(ofsAddr, FC_SetDataMode, (u16)mode); if (MOXA_IS_320(port->board) && baud >= 921600) return -1; baud = MoxaPortSetBaud(port, baud); if (termio->c_iflag & (IXON | IXOFF | IXANY)) { spin_lock_irq(&moxafunc_lock); writeb(termio->c_cc[VSTART], ofsAddr + FuncArg); writeb(termio->c_cc[VSTOP], ofsAddr + FuncArg1); writeb(FC_SetXonXoff, ofsAddr + FuncCode); moxa_wait_finish(ofsAddr); spin_unlock_irq(&moxafunc_lock); } return baud; } static int MoxaPortGetLineOut(struct moxa_port *port, int *dtrState, int *rtsState) { if (dtrState) *dtrState = !!(port->lineCtrl & DTR_ON); if (rtsState) *rtsState = !!(port->lineCtrl & RTS_ON); return 0; } static void MoxaPortLineCtrl(struct moxa_port *port, int dtr, int rts) { u8 mode = 0; if (dtr) mode |= DTR_ON; if (rts) mode |= RTS_ON; port->lineCtrl = mode; moxafunc(port->tableAddr, FC_LineControl, mode); } static void MoxaPortFlowCtrl(struct moxa_port *port, int rts, int cts, int txflow, int rxflow, int txany) { int mode = 0; if (rts) mode |= RTS_FlowCtl; if (cts) mode |= CTS_FlowCtl; if (txflow) mode |= Tx_FlowCtl; if (rxflow) mode |= Rx_FlowCtl; if (txany) mode |= IXM_IXANY; moxafunc(port->tableAddr, FC_SetFlowCtl, mode); } static int MoxaPortLineStatus(struct moxa_port *port) { void __iomem *ofsAddr; int val; ofsAddr = port->tableAddr; if (MOXA_IS_320(port->board)) val = moxafuncret(ofsAddr, FC_LineStatus, 0); else val = readw(ofsAddr + FlagStat) >> 4; val &= 0x0B; if (val & 8) val |= 4; moxa_new_dcdstate(port, val & 8); val &= 7; return val; } static int MoxaPortWriteData(struct tty_struct *tty, const unsigned char *buffer, int len) { struct moxa_port *port = tty->driver_data; void __iomem *baseAddr, *ofsAddr, *ofs; unsigned int c, total; u16 head, tail, tx_mask, spage, epage; u16 pageno, pageofs, bufhead; ofsAddr = port->tableAddr; baseAddr = port->board->basemem; tx_mask = readw(ofsAddr + TX_mask); spage = readw(ofsAddr + Page_txb); epage = readw(ofsAddr + EndPage_txb); tail = readw(ofsAddr + TXwptr); head = readw(ofsAddr + TXrptr); c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask); if (c > len) c = len; moxaLog.txcnt[port->port.tty->index] += c; total = c; if (spage == epage) { bufhead = readw(ofsAddr + Ofs_txb); writew(spage, baseAddr + Control_reg); while (c > 0) { if (head > tail) len = head - tail - 1; else len = tx_mask + 1 - tail; len = (c > len) ? len : c; ofs = baseAddr + DynPage_addr + bufhead + tail; memcpy_toio(ofs, buffer, len); buffer += len; tail = (tail + len) & tx_mask; c -= len; } } else { pageno = spage + (tail >> 13); pageofs = tail & Page_mask; while (c > 0) { len = Page_size - pageofs; if (len > c) len = c; writeb(pageno, baseAddr + Control_reg); ofs = baseAddr + DynPage_addr + pageofs; memcpy_toio(ofs, buffer, len); buffer += len; if (++pageno == epage) pageno = spage; pageofs = 0; c -= len; } tail = (tail + total) & tx_mask; } writew(tail, ofsAddr + TXwptr); writeb(1, ofsAddr + CD180TXirq); /* start to send */ return total; } static int MoxaPortReadData(struct moxa_port *port) { struct tty_struct *tty = port->port.tty; unsigned char *dst; void __iomem *baseAddr, *ofsAddr, *ofs; unsigned int count, len, total; u16 tail, rx_mask, spage, epage; u16 pageno, pageofs, bufhead, head; ofsAddr = port->tableAddr; baseAddr = port->board->basemem; head = readw(ofsAddr + RXrptr); tail = readw(ofsAddr + RXwptr); rx_mask = readw(ofsAddr + RX_mask); spage = readw(ofsAddr + Page_rxb); epage = readw(ofsAddr + EndPage_rxb); count = (tail >= head) ? (tail - head) : (tail - head + rx_mask + 1); if (count == 0) return 0; total = count; moxaLog.rxcnt[tty->index] += total; if (spage == epage) { bufhead = readw(ofsAddr + Ofs_rxb); writew(spage, baseAddr + Control_reg); while (count > 0) { ofs = baseAddr + DynPage_addr + bufhead + head; len = (tail >= head) ? (tail - head) : (rx_mask + 1 - head); len = tty_prepare_flip_string(&port->port, &dst, min(len, count)); memcpy_fromio(dst, ofs, len); head = (head + len) & rx_mask; count -= len; } } else { pageno = spage + (head >> 13); pageofs = head & Page_mask; while (count > 0) { writew(pageno, baseAddr + Control_reg); ofs = baseAddr + DynPage_addr + pageofs; len = tty_prepare_flip_string(&port->port, &dst, min(Page_size - pageofs, count)); memcpy_fromio(dst, ofs, len); count -= len; pageofs = (pageofs + len) & Page_mask; if (pageofs == 0 && ++pageno == epage) pageno = spage; } head = (head + total) & rx_mask; } writew(head, ofsAddr + RXrptr); if (readb(ofsAddr + FlagStat) & Xoff_state) { moxaLowWaterChk = 1; port->lowChkFlag = 1; } return total; } static int MoxaPortTxQueue(struct moxa_port *port) { void __iomem *ofsAddr = port->tableAddr; u16 rptr, wptr, mask; rptr = readw(ofsAddr + TXrptr); wptr = readw(ofsAddr + TXwptr); mask = readw(ofsAddr + TX_mask); return (wptr - rptr) & mask; } static int MoxaPortTxFree(struct moxa_port *port) { void __iomem *ofsAddr = port->tableAddr; u16 rptr, wptr, mask; rptr = readw(ofsAddr + TXrptr); wptr = readw(ofsAddr + TXwptr); mask = readw(ofsAddr + TX_mask); return mask - ((wptr - rptr) & mask); } static int MoxaPortRxQueue(struct moxa_port *port) { void __iomem *ofsAddr = port->tableAddr; u16 rptr, wptr, mask; rptr = readw(ofsAddr + RXrptr); wptr = readw(ofsAddr + RXwptr); mask = readw(ofsAddr + RX_mask); return (wptr - rptr) & mask; } static void MoxaPortTxDisable(struct moxa_port *port) { moxafunc(port->tableAddr, FC_SetXoffState, Magic_code); } static void MoxaPortTxEnable(struct moxa_port *port) { moxafunc(port->tableAddr, FC_SetXonState, Magic_code); } static int moxa_get_serial_info(struct moxa_port *info, struct serial_struct __user *retinfo) { struct serial_struct tmp = { .type = info->type, .line = info->port.tty->index, .flags = info->port.flags, .baud_base = 921600, .close_delay = info->port.close_delay }; return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; } static int moxa_set_serial_info(struct moxa_port *info, struct serial_struct __user *new_info) { struct serial_struct new_serial; if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) return -EFAULT; if (new_serial.irq != 0 || new_serial.port != 0 || new_serial.custom_divisor != 0 || new_serial.baud_base != 921600) return -EPERM; if (!capable(CAP_SYS_ADMIN)) { if (((new_serial.flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) return -EPERM; } else info->port.close_delay = new_serial.close_delay * HZ / 100; new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS); new_serial.flags |= (info->port.flags & ASYNC_FLAGS); MoxaSetFifo(info, new_serial.type == PORT_16550A); info->type = new_serial.type; return 0; } /***************************************************************************** * Static local functions: * *****************************************************************************/ static void MoxaSetFifo(struct moxa_port *port, int enable) { void __iomem *ofsAddr = port->tableAddr; if (!enable) { moxafunc(ofsAddr, FC_SetRxFIFOTrig, 0); moxafunc(ofsAddr, FC_SetTxFIFOCnt, 1); } else { moxafunc(ofsAddr, FC_SetRxFIFOTrig, 3); moxafunc(ofsAddr, FC_SetTxFIFOCnt, 16); } }
gpl-2.0
Stane1983/kernel-amlogic-mx
customer/drivers/misc/inv_mpu/mpu-dev.c
58
33226
/* $License: Copyright (C) 2011 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ #include <linux/i2c.h> #include <linux/i2c-dev.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/signal.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/version.h> #include <linux/pm.h> #include <linux/mutex.h> #include <linux/suspend.h> #include <linux/poll.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <linux/io.h> #include "mpuirq.h" #include "slaveirq.h" #include "mlsl.h" #include "mldl_cfg.h" #include <linux/mpu.h> /* Platform data for the MPU */ struct mpu_private_data { struct miscdevice dev; struct i2c_client *client; /* mldl_cfg data */ struct mldl_cfg mldl_cfg; struct mpu_ram mpu_ram; struct mpu_gyro_cfg mpu_gyro_cfg; struct mpu_offsets mpu_offsets; struct mpu_chip_info mpu_chip_info; struct inv_mpu_cfg inv_mpu_cfg; struct inv_mpu_state inv_mpu_state; struct mutex mutex; wait_queue_head_t mpu_event_wait; struct completion completion; struct timer_list timeout; struct notifier_block nb; struct mpuirq_data mpu_pm_event; int response_timeout; /* In seconds */ unsigned long event; int pid; struct module *slave_modules[EXT_SLAVE_NUM_TYPES]; }; struct mpu_private_data *mpu_private_data; static void mpu_pm_timeout(u_long data) { struct mpu_private_data *mpu = (struct mpu_private_data *)data; struct i2c_client *client = mpu->client; dev_dbg(&client->adapter->dev, "%s\n", __func__); complete(&mpu->completion); } static int mpu_pm_notifier_callback(struct notifier_block *nb, unsigned long event, void *unused) { struct mpu_private_data *mpu = container_of(nb, struct mpu_private_data, nb); struct i2c_client *client = mpu->client; struct timeval event_time; dev_dbg(&client->adapter->dev, "%s: %ld\n", __func__, event); /* Prevent the file handle from being closed before we initialize the completion event */ mutex_lock(&mpu->mutex); if (!(mpu->pid) || (event != PM_SUSPEND_PREPARE && event != PM_POST_SUSPEND)) { mutex_unlock(&mpu->mutex); return NOTIFY_OK; } if (event == PM_SUSPEND_PREPARE) mpu->event = MPU_PM_EVENT_SUSPEND_PREPARE; if (event == PM_POST_SUSPEND) mpu->event = MPU_PM_EVENT_POST_SUSPEND; do_gettimeofday(&event_time); mpu->mpu_pm_event.interruptcount++; mpu->mpu_pm_event.irqtime = (((long long)event_time.tv_sec) << 32) + event_time.tv_usec; mpu->mpu_pm_event.data_type = MPUIRQ_DATA_TYPE_PM_EVENT; mpu->mpu_pm_event.data = mpu->event; if (mpu->response_timeout > 0) { mpu->timeout.expires = jiffies + mpu->response_timeout * HZ; add_timer(&mpu->timeout); } INIT_COMPLETION(mpu->completion); mutex_unlock(&mpu->mutex); wake_up_interruptible(&mpu->mpu_event_wait); wait_for_completion(&mpu->completion); del_timer_sync(&mpu->timeout); dev_dbg(&client->adapter->dev, "%s: %ld DONE\n", __func__, event); return NOTIFY_OK; } static int mpu_dev_open(struct inode *inode, struct file *file) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); struct i2c_client *client = mpu->client; int result; int ii; dev_dbg(&client->adapter->dev, "%s\n", __func__); dev_dbg(&client->adapter->dev, "current->pid %d\n", current->pid); result = mutex_lock_interruptible(&mpu->mutex); if (mpu->pid) { mutex_unlock(&mpu->mutex); return -EBUSY; } mpu->pid = current->pid; /* Reset the sensors to the default */ if (result) { dev_err(&client->adapter->dev, "%s: mutex_lock_interruptible returned %d\n", __func__, result); return result; } for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) __module_get(mpu->slave_modules[ii]); mutex_unlock(&mpu->mutex); return 0; } /* close function - called when the "file" /dev/mpu is closed in userspace */ static int mpu_release(struct inode *inode, struct file *file) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; int result = 0; int ii; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); mldl_cfg->inv_mpu_cfg->requested_sensors = 0; result = inv_mpu_suspend(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_ALL_SENSORS); mpu->pid = 0; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) module_put(mpu->slave_modules[ii]); mutex_unlock(&mpu->mutex); complete(&mpu->completion); dev_dbg(&client->adapter->dev, "mpu_release\n"); return result; } /* read function called when from /dev/mpu is read. Read from the FIFO */ static ssize_t mpu_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); struct i2c_client *client = mpu->client; size_t len = sizeof(mpu->mpu_pm_event) + sizeof(unsigned long); int err; if (!mpu->event && (!(file->f_flags & O_NONBLOCK))) wait_event_interruptible(mpu->mpu_event_wait, mpu->event); if (!mpu->event || !buf || count < sizeof(mpu->mpu_pm_event)) return 0; err = copy_to_user(buf, &mpu->mpu_pm_event, sizeof(mpu->mpu_pm_event)); if (err) { dev_err(&client->adapter->dev, "Copy to user returned %d\n", err); return -EFAULT; } mpu->event = 0; return len; } static unsigned int mpu_poll(struct file *file, struct poll_table_struct *poll) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); int mask = 0; poll_wait(file, &mpu->mpu_event_wait, poll); if (mpu->event) mask |= POLLIN | POLLRDNORM; return mask; } static int mpu_dev_ioctl_get_ext_slave_platform_data( struct i2c_client *client, struct ext_slave_platform_data __user *arg) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct ext_slave_platform_data *pdata_slave; struct ext_slave_platform_data local_pdata_slave; if (copy_from_user(&local_pdata_slave, arg, sizeof(local_pdata_slave))) return -EFAULT; if (local_pdata_slave.type >= EXT_SLAVE_NUM_TYPES) return -EINVAL; pdata_slave = mpu->mldl_cfg.pdata_slave[local_pdata_slave.type]; /* All but private data and irq_data */ if (!pdata_slave) return -ENODEV; if (copy_to_user(arg, pdata_slave, sizeof(*pdata_slave))) return -EFAULT; return 0; } static int mpu_dev_ioctl_get_mpu_platform_data( struct i2c_client *client, struct mpu_platform_data __user *arg) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct mpu_platform_data *pdata = mpu->mldl_cfg.pdata; if (copy_to_user(arg, pdata, sizeof(*pdata))) return -EFAULT; return 0; } static int mpu_dev_ioctl_get_ext_slave_descr( struct i2c_client *client, struct ext_slave_descr __user *arg) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct ext_slave_descr *slave; struct ext_slave_descr local_slave; if (copy_from_user(&local_slave, arg, sizeof(local_slave))) return -EFAULT; if (local_slave.type >= EXT_SLAVE_NUM_TYPES) return -EINVAL; slave = mpu->mldl_cfg.slave[local_slave.type]; /* All but private data and irq_data */ if (!slave) return -ENODEV; if (copy_to_user(arg, slave, sizeof(*slave))) return -EFAULT; return 0; } /** * slave_config() - Pass a requested slave configuration to the slave sensor * * @adapter the adaptor to use to communicate with the slave * @mldl_cfg the mldl configuration structuer * @slave pointer to the slave descriptor * @usr_config The configuration to pass to the slave sensor * * returns 0 or non-zero error code */ static int inv_mpu_config(struct mldl_cfg *mldl_cfg, void *gyro_adapter, struct ext_slave_config __user *usr_config) { int retval = 0; struct ext_slave_config config; retval = copy_from_user(&config, usr_config, sizeof(config)); if (retval) return -EFAULT; if (config.len && config.data) { void *data; data = kmalloc(config.len, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)config.data, config.len); if (retval) { retval = -EFAULT; kfree(data); return retval; } config.data = data; } retval = gyro_config(gyro_adapter, mldl_cfg, &config); kfree(config.data); return retval; } static int inv_mpu_get_config(struct mldl_cfg *mldl_cfg, void *gyro_adapter, struct ext_slave_config __user *usr_config) { int retval = 0; struct ext_slave_config config; void *user_data; retval = copy_from_user(&config, usr_config, sizeof(config)); if (retval) return -EFAULT; user_data = config.data; if (config.len && config.data) { void *data; data = kmalloc(config.len, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)config.data, config.len); if (retval) { retval = -EFAULT; kfree(data); return retval; } config.data = data; } retval = gyro_get_config(gyro_adapter, mldl_cfg, &config); if (!retval) retval = copy_to_user((unsigned char __user *)user_data, config.data, config.len); kfree(config.data); return retval; } static int slave_config(struct mldl_cfg *mldl_cfg, void *gyro_adapter, void *slave_adapter, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, struct ext_slave_config __user *usr_config) { int retval = 0; struct ext_slave_config config; if ((!slave) || (!slave->config)) return -ENODEV; retval = copy_from_user(&config, usr_config, sizeof(config)); if (retval) return -EFAULT; if (config.len && config.data) { void *data; data = kmalloc(config.len, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)config.data, config.len); if (retval) { retval = -EFAULT; kfree(data); return retval; } config.data = data; } retval = inv_mpu_slave_config(mldl_cfg, gyro_adapter, slave_adapter, &config, slave, pdata); kfree(config.data); return retval; } static int slave_get_config(struct mldl_cfg *mldl_cfg, void *gyro_adapter, void *slave_adapter, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, struct ext_slave_config __user *usr_config) { int retval = 0; struct ext_slave_config config; void *user_data; if (!(slave) || !(slave->get_config)) return -ENODEV; retval = copy_from_user(&config, usr_config, sizeof(config)); if (retval) return -EFAULT; user_data = config.data; if (config.len && config.data) { void *data; data = kmalloc(config.len, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)config.data, config.len); if (retval) { retval = -EFAULT; kfree(data); return retval; } config.data = data; } retval = inv_mpu_get_slave_config(mldl_cfg, gyro_adapter, slave_adapter, &config, slave, pdata); if (retval) { kfree(config.data); return retval; } retval = copy_to_user((unsigned char __user *)user_data, config.data, config.len); kfree(config.data); return retval; } static int inv_slave_read(struct mldl_cfg *mldl_cfg, void *gyro_adapter, void *slave_adapter, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, void __user *usr_data) { int retval; unsigned char *data; data = kzalloc(slave->read_len, GFP_KERNEL); if (!data) return -EFAULT; retval = inv_mpu_slave_read(mldl_cfg, gyro_adapter, slave_adapter, slave, pdata, data); if ((!retval) && (copy_to_user((unsigned char __user *)usr_data, data, slave->read_len))) retval = -EFAULT; kfree(data); return retval; } static int mpu_handle_mlsl(void *sl_handle, unsigned char addr, unsigned int cmd, struct mpu_read_write __user *usr_msg) { int retval = 0; struct mpu_read_write msg; unsigned char *user_data; retval = copy_from_user(&msg, usr_msg, sizeof(msg)); if (retval) return -EFAULT; user_data = msg.data; if (msg.length && msg.data) { unsigned char *data; data = kmalloc(msg.length, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)msg.data, msg.length); if (retval) { retval = -EFAULT; kfree(data); return retval; } msg.data = data; } else { return -EPERM; } switch (cmd) { case MPU_READ: retval = inv_serial_read(sl_handle, addr, msg.address, msg.length, msg.data); break; case MPU_WRITE: retval = inv_serial_write(sl_handle, addr, msg.length, msg.data); break; case MPU_READ_MEM: retval = inv_serial_read_mem(sl_handle, addr, msg.address, msg.length, msg.data); break; case MPU_WRITE_MEM: retval = inv_serial_write_mem(sl_handle, addr, msg.address, msg.length, msg.data); break; case MPU_READ_FIFO: retval = inv_serial_read_fifo(sl_handle, addr, msg.length, msg.data); break; case MPU_WRITE_FIFO: retval = inv_serial_write_fifo(sl_handle, addr, msg.length, msg.data); break; }; if (retval) { dev_err(&((struct i2c_adapter *)sl_handle)->dev, "%s: i2c %d error %d\n", __func__, cmd, retval); kfree(msg.data); return retval; } retval = copy_to_user((unsigned char __user *)user_data, msg.data, msg.length); kfree(msg.data); return retval; } /* ioctl - I/O control */ static long mpu_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; int retval = 0; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_descr **slave = mldl_cfg->slave; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; retval = mutex_lock_interruptible(&mpu->mutex); if (retval) { dev_err(&client->adapter->dev, "%s: mutex_lock_interruptible returned %d\n", __func__, retval); return retval; } switch (cmd) { case MPU_GET_EXT_SLAVE_PLATFORM_DATA: retval = mpu_dev_ioctl_get_ext_slave_platform_data( client, (struct ext_slave_platform_data __user *)arg); break; case MPU_GET_MPU_PLATFORM_DATA: retval = mpu_dev_ioctl_get_mpu_platform_data( client, (struct mpu_platform_data __user *)arg); break; case MPU_GET_EXT_SLAVE_DESCR: retval = mpu_dev_ioctl_get_ext_slave_descr( client, (struct ext_slave_descr __user *)arg); break; case MPU_READ: case MPU_WRITE: case MPU_READ_MEM: case MPU_WRITE_MEM: case MPU_READ_FIFO: case MPU_WRITE_FIFO: retval = mpu_handle_mlsl( slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], mldl_cfg->mpu_chip_info->addr, cmd, (struct mpu_read_write __user *)arg); break; case MPU_CONFIG_GYRO: retval = inv_mpu_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], (struct ext_slave_config __user *)arg); break; case MPU_CONFIG_ACCEL: retval = slave_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave[EXT_SLAVE_TYPE_ACCEL], pdata_slave[EXT_SLAVE_TYPE_ACCEL], (struct ext_slave_config __user *)arg); break; case MPU_CONFIG_COMPASS: retval = slave_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave[EXT_SLAVE_TYPE_COMPASS], pdata_slave[EXT_SLAVE_TYPE_COMPASS], (struct ext_slave_config __user *)arg); break; case MPU_CONFIG_PRESSURE: retval = slave_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], slave[EXT_SLAVE_TYPE_PRESSURE], pdata_slave[EXT_SLAVE_TYPE_PRESSURE], (struct ext_slave_config __user *)arg); break; case MPU_GET_CONFIG_GYRO: retval = inv_mpu_get_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], (struct ext_slave_config __user *)arg); break; case MPU_GET_CONFIG_ACCEL: retval = slave_get_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave[EXT_SLAVE_TYPE_ACCEL], pdata_slave[EXT_SLAVE_TYPE_ACCEL], (struct ext_slave_config __user *)arg); break; case MPU_GET_CONFIG_COMPASS: retval = slave_get_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave[EXT_SLAVE_TYPE_COMPASS], pdata_slave[EXT_SLAVE_TYPE_COMPASS], (struct ext_slave_config __user *)arg); break; case MPU_GET_CONFIG_PRESSURE: retval = slave_get_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], slave[EXT_SLAVE_TYPE_PRESSURE], pdata_slave[EXT_SLAVE_TYPE_PRESSURE], (struct ext_slave_config __user *)arg); break; case MPU_SUSPEND: retval = inv_mpu_suspend( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], arg); break; case MPU_RESUME: retval = inv_mpu_resume( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], arg); break; case MPU_PM_EVENT_HANDLED: dev_dbg(&client->adapter->dev, "%s: %d\n", __func__, cmd); complete(&mpu->completion); break; case MPU_READ_ACCEL: retval = inv_slave_read( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave[EXT_SLAVE_TYPE_ACCEL], pdata_slave[EXT_SLAVE_TYPE_ACCEL], (unsigned char __user *)arg); break; case MPU_READ_COMPASS: retval = inv_slave_read( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave[EXT_SLAVE_TYPE_COMPASS], pdata_slave[EXT_SLAVE_TYPE_COMPASS], (unsigned char __user *)arg); break; case MPU_READ_PRESSURE: retval = inv_slave_read( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], slave[EXT_SLAVE_TYPE_PRESSURE], pdata_slave[EXT_SLAVE_TYPE_PRESSURE], (unsigned char __user *)arg); break; case MPU_GET_REQUESTED_SENSORS: if (copy_to_user( (__u32 __user *)arg, &mldl_cfg->inv_mpu_cfg->requested_sensors, sizeof(mldl_cfg->inv_mpu_cfg->requested_sensors))) retval = -EFAULT; break; case MPU_SET_REQUESTED_SENSORS: mldl_cfg->inv_mpu_cfg->requested_sensors = arg; break; case MPU_GET_IGNORE_SYSTEM_SUSPEND: if (copy_to_user( (unsigned char __user *)arg, &mldl_cfg->inv_mpu_cfg->ignore_system_suspend, sizeof(mldl_cfg->inv_mpu_cfg->ignore_system_suspend))) retval = -EFAULT; break; case MPU_SET_IGNORE_SYSTEM_SUSPEND: mldl_cfg->inv_mpu_cfg->ignore_system_suspend = arg; break; case MPU_GET_MLDL_STATUS: if (copy_to_user( (unsigned char __user *)arg, &mldl_cfg->inv_mpu_state->status, sizeof(mldl_cfg->inv_mpu_state->status))) retval = -EFAULT; break; case MPU_GET_I2C_SLAVES_ENABLED: if (copy_to_user( (unsigned char __user *)arg, &mldl_cfg->inv_mpu_state->i2c_slaves_enabled, sizeof(mldl_cfg->inv_mpu_state->i2c_slaves_enabled))) retval = -EFAULT; break; default: dev_err(&client->adapter->dev, "%s: Unknown cmd %x, arg %lu\n", __func__, cmd, arg); retval = -EINVAL; }; mutex_unlock(&mpu->mutex); dev_dbg(&client->adapter->dev, "%s: %08x, %08lx, %d\n", __func__, cmd, arg, retval); if (retval > 0) retval = -retval; return retval; } void mpu_shutdown(struct i2c_client *client) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); (void)inv_mpu_suspend(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_ALL_SENSORS); mutex_unlock(&mpu->mutex); dev_dbg(&client->adapter->dev, "%s\n", __func__); } int mpu_dev_suspend(struct i2c_client *client, pm_message_t mesg) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); if (!mldl_cfg->inv_mpu_cfg->ignore_system_suspend) { dev_dbg(&client->adapter->dev, "%s: suspending on event %d\n", __func__, mesg.event); (void)inv_mpu_suspend(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_ALL_SENSORS); } else { dev_dbg(&client->adapter->dev, "%s: Already suspended %d\n", __func__, mesg.event); } mutex_unlock(&mpu->mutex); return 0; } int mpu_dev_resume(struct i2c_client *client) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); if (mpu->pid && !mldl_cfg->inv_mpu_cfg->ignore_system_suspend) { (void)inv_mpu_resume(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], mldl_cfg->inv_mpu_cfg->requested_sensors); dev_dbg(&client->adapter->dev, "%s for pid %d\n", __func__, mpu->pid); } mutex_unlock(&mpu->mutex); return 0; } /* define which file operations are supported */ static const struct file_operations mpu_fops = { .owner = THIS_MODULE, .read = mpu_read, .poll = mpu_poll, .unlocked_ioctl = mpu_dev_ioctl, .open = mpu_dev_open, .release = mpu_release, }; int inv_mpu_register_slave(struct module *slave_module, struct i2c_client *slave_client, struct ext_slave_platform_data *slave_pdata, struct ext_slave_descr *(*get_slave_descr)(void)) { struct mpu_private_data *mpu = mpu_private_data; struct mldl_cfg *mldl_cfg; struct ext_slave_descr *slave_descr; struct ext_slave_platform_data **pdata_slave; char *irq_name = NULL; int result = 0; if (!slave_client || !slave_pdata || !get_slave_descr) return -EINVAL; if (!mpu) { dev_err(&slave_client->adapter->dev, "%s: Null mpu_private_data\n", __func__); return -EINVAL; } mldl_cfg = &mpu->mldl_cfg; pdata_slave = mldl_cfg->pdata_slave; slave_descr = get_slave_descr(); if (!slave_descr) { dev_err(&slave_client->adapter->dev, "%s: Null ext_slave_descr\n", __func__); return -EINVAL; } mutex_lock(&mpu->mutex); if (mpu->pid) { mutex_unlock(&mpu->mutex); return -EBUSY; } if (pdata_slave[slave_descr->type]) { result = -EBUSY; goto out_unlock_mutex; } slave_pdata->address = slave_client->addr; slave_pdata->irq = slave_client->irq; slave_pdata->adapt_num = i2c_adapter_id(slave_client->adapter); dev_info(&slave_client->adapter->dev, "%s: +%s Type %d: Addr: %2x IRQ: %2d, Adapt: %2d\n", __func__, slave_descr->name, slave_descr->type, slave_pdata->address, slave_pdata->irq, slave_pdata->adapt_num); switch (slave_descr->type) { case EXT_SLAVE_TYPE_ACCEL: irq_name = "accelirq"; break; case EXT_SLAVE_TYPE_COMPASS: irq_name = "compassirq"; break; case EXT_SLAVE_TYPE_PRESSURE: irq_name = "pressureirq"; break; default: irq_name = "none"; }; if (slave_descr->init) { result = slave_descr->init(slave_client->adapter, slave_descr, slave_pdata); if (result) { dev_err(&slave_client->adapter->dev, "%s init failed %d\n", slave_descr->name, result); goto out_unlock_mutex; } } pdata_slave[slave_descr->type] = slave_pdata; mpu->slave_modules[slave_descr->type] = slave_module; mldl_cfg->slave[slave_descr->type] = slave_descr; goto out_unlock_mutex; out_unlock_mutex: mutex_unlock(&mpu->mutex); if (!result && irq_name && (slave_pdata->irq > 0)) { int warn_result; dev_info(&slave_client->adapter->dev, "Installing %s irq using %d\n", irq_name, slave_pdata->irq); warn_result = slaveirq_init(slave_client->adapter, slave_pdata, irq_name); if (result) dev_WARN(&slave_client->adapter->dev, "%s irq assigned error: %d\n", slave_descr->name, warn_result); } else { dev_WARN(&slave_client->adapter->dev, "%s irq not assigned: %d %d %d\n", slave_descr->name, result, (int)irq_name, slave_pdata->irq); } return result; } EXPORT_SYMBOL(inv_mpu_register_slave); void inv_mpu_unregister_slave(struct i2c_client *slave_client, struct ext_slave_platform_data *slave_pdata, struct ext_slave_descr *(*get_slave_descr)(void)) { struct mpu_private_data *mpu = mpu_private_data; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct ext_slave_descr *slave_descr; int result; dev_info(&slave_client->adapter->dev, "%s\n", __func__); if (!slave_client || !slave_pdata || !get_slave_descr) return; if (slave_pdata->irq) slaveirq_exit(slave_pdata); slave_descr = get_slave_descr(); if (!slave_descr) return; mutex_lock(&mpu->mutex); if (slave_descr->exit) { result = slave_descr->exit(slave_client->adapter, slave_descr, slave_pdata); if (result) dev_err(&slave_client->adapter->dev, "Accel exit failed %d\n", result); } mldl_cfg->slave[slave_descr->type] = NULL; mldl_cfg->pdata_slave[slave_descr->type] = NULL; mpu->slave_modules[slave_descr->type] = NULL; mutex_unlock(&mpu->mutex); } EXPORT_SYMBOL(inv_mpu_unregister_slave); static unsigned short normal_i2c[] = { I2C_CLIENT_END }; static const struct i2c_device_id mpu_id[] = { {"mpu3050", 0}, {"mpu6050", 0}, {"mpu6050_no_accel", 0}, {} }; MODULE_DEVICE_TABLE(i2c, mpu_id); int mpu_probe(struct i2c_client *client, const struct i2c_device_id *devid) { struct mpu_platform_data *pdata; struct mpu_private_data *mpu; struct mldl_cfg *mldl_cfg; int res = 0; int ii = 0; dev_info(&client->adapter->dev, "%s: %d\n", __func__, ii++); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { res = -ENODEV; goto out_check_functionality_failed; } mpu = kzalloc(sizeof(struct mpu_private_data), GFP_KERNEL); if (!mpu) { res = -ENOMEM; goto out_alloc_data_failed; } mldl_cfg = &mpu->mldl_cfg; mldl_cfg->mpu_ram = &mpu->mpu_ram; mldl_cfg->mpu_gyro_cfg = &mpu->mpu_gyro_cfg; mldl_cfg->mpu_offsets = &mpu->mpu_offsets; mldl_cfg->mpu_chip_info = &mpu->mpu_chip_info; mldl_cfg->inv_mpu_cfg = &mpu->inv_mpu_cfg; mldl_cfg->inv_mpu_state = &mpu->inv_mpu_state; mldl_cfg->mpu_ram->length = MPU_MEM_NUM_RAM_BANKS * MPU_MEM_BANK_SIZE; mldl_cfg->mpu_ram->ram = kzalloc(mldl_cfg->mpu_ram->length, GFP_KERNEL); if (!mldl_cfg->mpu_ram->ram) { res = -ENOMEM; goto out_alloc_ram_failed; } mpu_private_data = mpu; i2c_set_clientdata(client, mpu); mpu->client = client; init_waitqueue_head(&mpu->mpu_event_wait); mutex_init(&mpu->mutex); init_completion(&mpu->completion); mpu->response_timeout = 60; /* Seconds */ mpu->timeout.function = mpu_pm_timeout; mpu->timeout.data = (u_long) mpu; init_timer(&mpu->timeout); mpu->nb.notifier_call = mpu_pm_notifier_callback; mpu->nb.priority = 0; res = register_pm_notifier(&mpu->nb); if (res) { dev_err(&client->adapter->dev, "Unable to register pm_notifier %d\n", res); goto out_register_pm_notifier_failed; } pdata = (struct mpu_platform_data *)client->dev.platform_data; if (!pdata) { dev_WARN(&client->adapter->dev, "Missing platform data for mpu\n"); } mldl_cfg->pdata = pdata; mldl_cfg->mpu_chip_info->addr = client->addr; res = inv_mpu_open(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL); if (res) { dev_err(&client->adapter->dev, "Unable to open %s %d\n", MPU_NAME, res); res = -ENODEV; goto out_whoami_failed; } mpu->dev.minor = MISC_DYNAMIC_MINOR; mpu->dev.name = "mpu"; mpu->dev.fops = &mpu_fops; res = misc_register(&mpu->dev); if (res < 0) { dev_err(&client->adapter->dev, "ERROR: misc_register returned %d\n", res); goto out_misc_register_failed; } if (client->irq) { dev_info(&client->adapter->dev, "Installing irq using %d\n", client->irq); res = mpuirq_init(client, mldl_cfg); if (res) goto out_mpuirq_failed; } else { dev_WARN(&client->adapter->dev, "Missing %s IRQ\n", MPU_NAME); } return res; out_mpuirq_failed: misc_deregister(&mpu->dev); out_misc_register_failed: inv_mpu_close(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL); out_whoami_failed: unregister_pm_notifier(&mpu->nb); out_register_pm_notifier_failed: kfree(mldl_cfg->mpu_ram->ram); mpu_private_data = NULL; out_alloc_ram_failed: kfree(mpu); out_alloc_data_failed: out_check_functionality_failed: dev_err(&client->adapter->dev, "%s failed %d\n", __func__, res); return res; } static int mpu_remove(struct i2c_client *client) { struct mpu_private_data *mpu = i2c_get_clientdata(client); struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; dev_dbg(&client->adapter->dev, "%s\n", __func__); inv_mpu_close(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE]); if (client->irq) mpuirq_exit(); misc_deregister(&mpu->dev); unregister_pm_notifier(&mpu->nb); kfree(mpu->mldl_cfg.mpu_ram->ram); kfree(mpu); return 0; } static struct i2c_driver mpu_driver = { .class = I2C_CLASS_HWMON, .probe = mpu_probe, .remove = mpu_remove, .id_table = mpu_id, .driver = { .owner = THIS_MODULE, .name = MPU_NAME, }, .address_list = normal_i2c, .shutdown = mpu_shutdown, /* optional */ .suspend = mpu_dev_suspend, /* optional */ .resume = mpu_dev_resume, /* optional */ }; static int __init mpu_init(void) { int res = i2c_add_driver(&mpu_driver); pr_info("%s: Probe name %s\n", __func__, MPU_NAME); if (res) pr_err("%s failed\n", __func__); return res; } static void __exit mpu_exit(void) { pr_info("%s\n", __func__); i2c_del_driver(&mpu_driver); } module_init(mpu_init); module_exit(mpu_exit); MODULE_AUTHOR("Invensense Corporation"); MODULE_DESCRIPTION("User space character device interface for MPU"); MODULE_LICENSE("GPL"); MODULE_ALIAS(MPU_NAME);
gpl-2.0
Dazzworld/android_kernel_zte_hn8916
arch/arm/mm/dma-mapping.c
58
54127
/* * linux/arch/arm/mm/dma-mapping.c * * Copyright (C) 2000-2004 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * DMA uncached mapping support. */ #include <linux/bootmem.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/init.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/dma-contiguous.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/iommu.h> #include <linux/io.h> #include <linux/vmalloc.h> #include <linux/sizes.h> #include <asm/memory.h> #include <asm/highmem.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/mach/arch.h> #include <asm/dma-iommu.h> #include <asm/mach/map.h> #include <asm/system_info.h> #include <asm/dma-contiguous.h> #include "mm.h" /* * The DMA API is built upon the notion of "buffer ownership". A buffer * is either exclusively owned by the CPU (and therefore may be accessed * by it) or exclusively owned by the DMA device. These helper functions * represent the transitions between these two ownership states. * * Note, however, that on later ARMs, this notion does not work due to * speculative prefetches. We model our approach on the assumption that * the CPU does do speculative prefetches, which means we clean caches * before transfers and delay cache invalidation until transfer completion. * */ static void __dma_page_cpu_to_dev(struct page *, unsigned long, size_t, enum dma_data_direction); static void __dma_page_dev_to_cpu(struct page *, unsigned long, size_t, enum dma_data_direction); /** * arm_dma_map_page - map a portion of a page for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @page: page that buffer resides in * @offset: offset into page for start of buffer * @size: size of buffer to map * @dir: DMA transfer direction * * Ensure that any data held in the cache is appropriately discarded * or written back. * * The device owns this memory once this call has completed. The CPU * can regain ownership by calling dma_unmap_page(). */ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(page, offset, size, dir); return pfn_to_dma(dev, page_to_pfn(page)) + offset; } static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { return pfn_to_dma(dev, page_to_pfn(page)) + offset; } /** * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) * * Unmap a page streaming mode DMA translation. The handle and size * must match what was provided in the previous dma_map_page() call. * All other usages are undefined. * * After this call, reads by the CPU to the buffer are guaranteed to see * whatever the device wrote there. */ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); } static void arm_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { unsigned int offset = handle & (PAGE_SIZE - 1); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); __dma_page_dev_to_cpu(page, offset, size, dir); } static void arm_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { unsigned int offset = handle & (PAGE_SIZE - 1); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); __dma_page_cpu_to_dev(page, offset, size, dir); } struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, .mmap = arm_dma_mmap, .get_sgtable = arm_dma_get_sgtable, .map_page = arm_dma_map_page, .unmap_page = arm_dma_unmap_page, .map_sg = arm_dma_map_sg, .unmap_sg = arm_dma_unmap_sg, .sync_single_for_cpu = arm_dma_sync_single_for_cpu, .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_dma_ops); static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs); struct dma_map_ops arm_coherent_dma_ops = { .alloc = arm_coherent_dma_alloc, .free = arm_coherent_dma_free, .mmap = arm_dma_mmap, .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_coherent_dma_ops); static int __dma_supported(struct device *dev, u64 mask, bool warn) { unsigned long max_dma_pfn; /* * If the mask allows for more memory than we can address, * and we actually have that much memory, then we must * indicate that DMA to this device is not supported. */ if (sizeof(mask) != sizeof(dma_addr_t) && mask > (dma_addr_t)~0 && dma_to_pfn(dev, ~0) < max_pfn) { if (warn) { dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", mask); dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); } return 0; } max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); /* * Translate the device's DMA mask to a PFN limit. This * PFN number includes the page which we can DMA to. */ if (dma_to_pfn(dev, mask) < max_dma_pfn) { if (warn) dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", mask, dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, max_dma_pfn + 1); return 0; } return 1; } static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = (u64)arm_dma_limit; if (dev) { mask = dev->coherent_dma_mask; /* * Sanity check the DMA mask - it must be non-zero, and * must be able to be satisfied by a DMA allocation. */ if (mask == 0) { dev_warn(dev, "coherent DMA mask is unset\n"); return 0; } if (!__dma_supported(dev, mask, true)) return 0; } return mask; } static void __dma_clear_buffer(struct page *page, size_t size, struct dma_attrs *attrs) { /* * Ensure that the allocated pages are zeroed, and that any data * lurking in the kernel direct-mapped region is invalidated. */ if (PageHighMem(page)) { phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); phys_addr_t end = base + size; while (size > 0) { void *ptr = kmap_atomic(page); if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)) memset(ptr, 0, PAGE_SIZE); dmac_flush_range(ptr, ptr + PAGE_SIZE); kunmap_atomic(ptr); page++; size -= PAGE_SIZE; } outer_flush_range(base, end); } else { void *ptr = page_address(page); if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)) memset(ptr, 0, size); dmac_flush_range(ptr, ptr + size); outer_flush_range(__pa(ptr), __pa(ptr) + size); } } /* * Allocate a DMA buffer for 'dev' of size 'size' using the * specified gfp mask. Note that 'size' must be page aligned. */ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) { unsigned long order = get_order(size); struct page *page, *p, *e; page = alloc_pages(gfp, order); if (!page) return NULL; /* * Now split the huge page and free the excess pages */ split_page(page, order); for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) __free_page(p); __dma_clear_buffer(page, size, NULL); return page; } /* * Free a DMA buffer. 'size' must be page aligned. */ static void __dma_free_buffer(struct page *page, size_t size) { struct page *e = page + (size >> PAGE_SHIFT); while (page < e) { __free_page(page); page++; } } #ifdef CONFIG_MMU #ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, const void *caller, struct dma_attrs *attrs); static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, const void *caller); static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { struct vm_struct *area; unsigned long addr; /* * DMA allocation can be mapped to user space, so lets * set VM_USERMAP flags too. */ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; area->phys_addr = __pfn_to_phys(page_to_pfn(page)); if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { vunmap((void *)addr); return NULL; } return (void *)addr; } static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn) { unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; struct vm_struct *area = find_vm_area(cpu_addr); if (!area || (area->flags & flags) != flags) { WARN(!no_warn, "trying to free invalid coherent area: %p\n", cpu_addr); return; } unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K struct dma_pool { size_t size; spinlock_t lock; unsigned long *bitmap; unsigned long nr_pages; void *vaddr; struct page **pages; }; static struct dma_pool atomic_pool = { .size = DEFAULT_DMA_COHERENT_POOL_SIZE, }; static int __init early_coherent_pool(char *p) { atomic_pool.size = memparse(p, &p); return 0; } early_param("coherent_pool", early_coherent_pool); void __init init_dma_coherent_pool_size(unsigned long size) { /* * Catch any attempt to set the pool size too late. */ BUG_ON(atomic_pool.vaddr); /* * Set architecture specific coherent pool size only if * it has not been changed by kernel command line parameter. */ if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) atomic_pool.size = size; } /* * Initialise the coherent pool for atomic allocations. */ static int __init atomic_pool_init(void) { struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); gfp_t gfp = GFP_KERNEL | GFP_DMA; unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; struct page **pages; void *ptr; int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!bitmap) goto no_bitmap; pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); if (!pages) goto no_pages; if (IS_ENABLED(CONFIG_CMA)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, atomic_pool_init, NULL); else ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, atomic_pool_init); if (ptr) { int i; for (i = 0; i < nr_pages; i++) pages[i] = page + i; spin_lock_init(&pool->lock); pool->vaddr = ptr; pool->pages = pages; pool->bitmap = bitmap; pool->nr_pages = nr_pages; pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", (unsigned)pool->size / 1024); return 0; } kfree(pages); no_pages: kfree(bitmap); no_bitmap: pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", (unsigned)pool->size / 1024); return -ENOMEM; } /* * CMA is activated by core_initcall, so we must be called after it. */ postcore_initcall(atomic_pool_init); struct dma_contig_early_reserve { phys_addr_t base; unsigned long size; }; static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; static int dma_mmu_remap_num __initdata; void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { dma_mmu_remap[dma_mmu_remap_num].base = base; dma_mmu_remap[dma_mmu_remap_num].size = size; dma_mmu_remap_num++; } void __init dma_contiguous_remap(void) { int i; for (i = 0; i < dma_mmu_remap_num; i++) { phys_addr_t start = dma_mmu_remap[i].base; phys_addr_t end = start + dma_mmu_remap[i].size; struct map_desc map; unsigned long addr; if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) continue; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY_DMA_READY; /* * Clear previous low-memory mapping */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); iotable_init(&map, 1); } } static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { struct page *page = virt_to_page(addr); pgprot_t prot = *(pgprot_t *)data; set_pte_ext(pte, mk_pte(page, prot), 0); return 0; } static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { pte_clear(&init_mm, addr, pte); return 0; } static void __dma_remap(struct page *page, size_t size, pgprot_t prot, bool no_kernel_map) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; int (*func)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); if (no_kernel_map) func = __dma_clear_pte; else func = __dma_update_pte; apply_to_page_range(&init_mm, start, size, func, &prot); dsb(); flush_tlb_kernel_range(start, end); } static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, const void *caller) { struct page *page; void *ptr; page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; ptr = __dma_alloc_remap(page, size, gfp, prot, caller); if (!ptr) { __dma_free_buffer(page, size); return NULL; } *ret_page = page; return ptr; } static void *__alloc_from_pool(size_t size, struct page **ret_page) { struct dma_pool *pool = &atomic_pool; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int pageno; unsigned long flags; void *ptr = NULL; unsigned long align_mask; if (!pool->vaddr) { WARN(1, "coherent pool not initialised!\n"); return NULL; } /* * Align the region allocation - allocations from pool are rather * small, so align them to their order in pages, minimum is a page * size. This helps reduce fragmentation of the DMA space. */ align_mask = (1 << get_order(size)) - 1; spin_lock_irqsave(&pool->lock, flags); pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, 0, count, align_mask); if (pageno < pool->nr_pages) { bitmap_set(pool->bitmap, pageno, count); ptr = pool->vaddr + PAGE_SIZE * pageno; *ret_page = pool->pages[pageno]; } else { pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" "Please increase it with coherent_pool= kernel parameter!\n", (unsigned)pool->size / 1024); } spin_unlock_irqrestore(&pool->lock, flags); return ptr; } static bool __in_atomic_pool(void *start, size_t size) { struct dma_pool *pool = &atomic_pool; void *end = start + size; void *pool_start = pool->vaddr; void *pool_end = pool->vaddr + pool->size; if (start < pool_start || start >= pool_end) return false; if (end <= pool_end) return true; WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", start, end - 1, pool_start, pool_end - 1); return false; } static int __free_from_pool(void *start, size_t size) { struct dma_pool *pool = &atomic_pool; unsigned long pageno, count; unsigned long flags; if (!__in_atomic_pool(start, size)) return 0; pageno = (start - pool->vaddr) >> PAGE_SHIFT; count = size >> PAGE_SHIFT; spin_lock_irqsave(&pool->lock, flags); bitmap_clear(pool->bitmap, pageno, count); spin_unlock_irqrestore(&pool->lock, flags); return 1; } #define NO_KERNEL_MAPPING_DUMMY 0x2222 static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, const void *caller, struct dma_attrs *attrs) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; unsigned long pfn; struct page *page; void *ptr; bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); pfn = dma_alloc_from_contiguous(dev, count, order); if (!pfn) return NULL; page = pfn_to_page(pfn); /* * skip completely if we neither need to zero nor sync. */ if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) && dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))) __dma_clear_buffer(page, size, attrs); if (PageHighMem(page)) { if (no_kernel_mapping) { /* * Something non-NULL needs to be returned here. Give * back a dummy address that is unmapped to catch * clients trying to use the address incorrectly */ ptr = (void *)NO_KERNEL_MAPPING_DUMMY; } else { ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); if (!ptr) { dma_release_from_contiguous(dev, pfn, count); return NULL; } } } else { __dma_remap(page, size, prot, no_kernel_mapping); ptr = page_address(page); } *ret_page = page; return ptr; } static void __free_from_contiguous(struct device *dev, struct page *page, void *cpu_addr, size_t size) { if (PageHighMem(page)) __dma_free_remap(cpu_addr, size, true); else __dma_remap(page, size, PAGE_KERNEL, false); dma_release_from_contiguous(dev, page_to_pfn(page), size >> PAGE_SHIFT); } static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) { if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) prot = pgprot_writecombine(prot); else if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs)) prot = pgprot_stronglyordered(prot); /* if non-consistent just pass back what was given */ else if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) prot = pgprot_dmacoherent(prot); return prot; } #define nommu() 0 #else /* !CONFIG_MMU */ #define nommu() 1 #define __get_dma_pgprot(attrs, prot) __pgprot(0) #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL #define __alloc_from_pool(size, ret_page) NULL #define __alloc_from_contiguous(dev, size, prot, ret, c, w) NULL #define __free_from_pool(cpu_addr, size) 0 #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) #define __dma_free_remap(cpu_addr, size, w) do { } while (0) #endif /* CONFIG_MMU */ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, struct page **ret_page) { struct page *page; page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; *ret_page = page; return page_address(page); } static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller, struct dma_attrs *attrs) { u64 mask = get_coherent_dma_mask(dev); struct page *page = NULL; void *addr; #ifdef CONFIG_DMA_API_DEBUG u64 limit = (mask + 1) & ~mask; if (limit && size >= limit) { dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", size, mask); return NULL; } #endif if (!mask) return NULL; if (mask < 0xffffffffULL) gfp |= GFP_DMA; /* * Following is a work-around (a.k.a. hack) to prevent pages * with __GFP_COMP being passed to split_page() which cannot * handle them. The real problem is that this flag probably * should be 0 on ARM as it is not supported on this * platform; see CONFIG_HUGETLBFS. */ gfp &= ~(__GFP_COMP); *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); if (is_coherent || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); else if (!IS_ENABLED(CONFIG_CMA)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); else addr = __alloc_from_contiguous(dev, size, prot, &page, caller, attrs); if (addr) *handle = pfn_to_dma(dev, page_to_pfn(page)); return addr; } /* * Allocate DMA-coherent memory space and return both the kernel remapped * virtual and bus address for that space. */ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; return __dma_alloc(dev, size, handle, gfp, prot, false, __builtin_return_address(0), attrs); } static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; return __dma_alloc(dev, size, handle, gfp, prot, true, __builtin_return_address(0), attrs); } /* * Create userspace mapping for the DMA-coherent memory. */ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { int ret = -ENXIO; #ifdef CONFIG_MMU unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); unsigned long off = vma->vm_pgoff; vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { ret = remap_pfn_range(vma, vma->vm_start, pfn + off, vma->vm_end - vma->vm_start, vma->vm_page_prot); } #endif /* CONFIG_MMU */ return ret; } /* * Free a buffer as defined by the above mapping. */ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs, bool is_coherent) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) return; size = PAGE_ALIGN(size); if (is_coherent || nommu()) { __dma_free_buffer(page, size); } else if (__free_from_pool(cpu_addr, size)) { return; } else if (!IS_ENABLED(CONFIG_CMA)) { __dma_free_remap(cpu_addr, size, false); __dma_free_buffer(page, size); } else { /* * Non-atomic allocations cannot be freed with IRQs disabled */ WARN_ON(irqs_disabled()); __free_from_contiguous(dev, page, cpu_addr, size); } } void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); } static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); } int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, struct dma_attrs *attrs) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); int ret; ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (unlikely(ret)) return ret; sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); return 0; } static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { unsigned long pfn; size_t left = size; pfn = page_to_pfn(page) + offset / PAGE_SIZE; offset %= PAGE_SIZE; /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ do { size_t len = left; void *vaddr; page = pfn_to_page(pfn); if (PageHighMem(page)) { if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; if (cache_is_vipt_nonaliasing()) { vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } else { vaddr = kmap_high_get(page); if (vaddr) { op(vaddr + offset, len, dir); kunmap_high(page); } } } else { vaddr = page_address(page) + offset; op(vaddr, len, dir); } offset = 0; pfn++; left -= len; } while (left); } /* * Make an area consistent for devices. * Note: Drivers should NOT use this function directly, as it will break * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; dma_cache_maint_page(page, off, size, dir, dmac_map_area); paddr = page_to_phys(page) + off; if (dir == DMA_FROM_DEVICE) { outer_inv_range(paddr, paddr + size); } else { outer_clean_range(paddr, paddr + size); } /* FIXME: non-speculating: flush on bidirectional mappings? */ } static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr = page_to_phys(page) + off; /* FIXME: non-speculating: not required */ /* don't bother invalidating if DMA to device */ if (dir != DMA_TO_DEVICE) outer_inv_range(paddr, paddr + size); dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); /* * Mark the D-cache clean for this page to avoid extra flushing. */ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) set_bit(PG_dcache_clean, &page->flags); } /** * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the dma_map_single interface. * Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}. * * Device ownership issues as mentioned for dma_map_single are the same * here. */ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i, j; for_each_sg(sg, s, nents, i) { #ifdef CONFIG_NEED_SG_DMA_LENGTH s->dma_length = s->length; #endif s->dma_address = ops->map_page(dev, sg_page(s), s->offset, s->length, dir, attrs); if (dma_mapping_error(dev, s->dma_address)) goto bad_mapping; } return nents; bad_mapping: for_each_sg(sg, s, i, j) ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); return 0; } /** * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to unmap (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); } /** * arm_dma_sync_sg_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, dir); } /** * arm_dma_sync_sg_for_device * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) ops->sync_single_for_device(dev, sg_dma_address(s), s->length, dir); } /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask * to this function. */ int dma_supported(struct device *dev, u64 mask) { return __dma_supported(dev, mask, false); } EXPORT_SYMBOL(dma_supported); int arm_dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return 0; } #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_debug_do_init); #ifdef CONFIG_ARM_DMA_USE_IOMMU /* IOMMU */ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, size_t size) { unsigned int order = get_order(size); unsigned int align = 0; unsigned int count, start; unsigned long flags; if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + (1 << mapping->order) - 1) >> mapping->order; if (order > mapping->order) align = (1 << (order - mapping->order)) - 1; spin_lock_irqsave(&mapping->lock, flags); start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, count, align); if (start > mapping->bits) { spin_unlock_irqrestore(&mapping->lock, flags); return DMA_ERROR_CODE; } bitmap_set(mapping->bitmap, start, count); spin_unlock_irqrestore(&mapping->lock, flags); return mapping->base + (start << (mapping->order + PAGE_SHIFT)); } static inline void __free_iova(struct dma_iommu_mapping *mapping, dma_addr_t addr, size_t size) { unsigned int start = (addr - mapping->base) >> (mapping->order + PAGE_SHIFT); unsigned int count = ((size >> PAGE_SHIFT) + (1 << mapping->order) - 1) >> mapping->order; unsigned long flags; spin_lock_irqsave(&mapping->lock, flags); bitmap_clear(mapping->bitmap, start, count); spin_unlock_irqrestore(&mapping->lock, flags); } static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp, struct dma_attrs *attrs) { struct page **pages; int count = size >> PAGE_SHIFT; int array_size = count * sizeof(struct page *); int i = 0; if (array_size <= PAGE_SIZE) pages = kzalloc(array_size, gfp); else pages = vzalloc(array_size); if (!pages) return NULL; if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { unsigned long order = get_order(size); struct page *page; unsigned long pfn; pfn = dma_alloc_from_contiguous(dev, count, order); if (!pfn) goto error; pfn = pfn_to_page(pfn); __dma_clear_buffer(page, size, NULL); for (i = 0; i < count; i++) pages[i] = page + i; return pages; } /* * IOMMU can map any pages, so himem can also be used here */ gfp |= __GFP_NOWARN | __GFP_HIGHMEM; while (count) { int j, order = __fls(count); pages[i] = alloc_pages(gfp, order); while (!pages[i] && order) pages[i] = alloc_pages(gfp, --order); if (!pages[i]) goto error; if (order) { split_page(pages[i], order); j = 1 << order; while (--j) pages[i + j] = pages[i] + j; } __dma_clear_buffer(pages[i], PAGE_SIZE << order, NULL); i += 1 << order; count -= 1 << order; } return pages; error: while (i--) if (pages[i]) __free_pages(pages[i], 0); if (array_size <= PAGE_SIZE) kfree(pages); else vfree(pages); return NULL; } static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size, struct dma_attrs *attrs) { int count = size >> PAGE_SHIFT; int array_size = count * sizeof(struct page *); int i; if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { dma_release_from_contiguous(dev, page_to_pfn(pages[0]), count); } else { for (i = 0; i < count; i++) if (pages[i]) __free_pages(pages[i], 0); } if (array_size <= PAGE_SIZE) kfree(pages); else vfree(pages); return 0; } /* * Create a CPU mapping for a specified pages */ static void * __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area; unsigned long p; area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, caller); if (!area) return NULL; area->pages = pages; area->nr_pages = nr_pages; p = (unsigned long)area->addr; for (i = 0; i < nr_pages; i++) { phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) goto err; p += PAGE_SIZE; } return area->addr; err: unmap_kernel_range((unsigned long)area->addr, size); vunmap(area->addr); return NULL; } /* * Create a mapping in device IO address space for specified pages */ static dma_addr_t __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_addr_t dma_addr, iova; int i, ret = DMA_ERROR_CODE; dma_addr = __alloc_iova(mapping, size); if (dma_addr == DMA_ERROR_CODE) return dma_addr; iova = dma_addr; for (i = 0; i < count; ) { unsigned int next_pfn = page_to_pfn(pages[i]) + 1; phys_addr_t phys = page_to_phys(pages[i]); unsigned int len, j; for (j = i + 1; j < count; j++, next_pfn++) if (page_to_pfn(pages[j]) != next_pfn) break; len = (j - i) << PAGE_SHIFT; ret = iommu_map(mapping->domain, iova, phys, len, 0); if (ret < 0) goto fail; iova += len; i = j; } return dma_addr; fail: iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __free_iova(mapping, dma_addr, size); return DMA_ERROR_CODE; } static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; /* * add optional in-page offset from iova to size and align * result to page size */ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); iova &= PAGE_MASK; iommu_unmap(mapping->domain, iova, size); __free_iova(mapping, iova, size); return 0; } static struct page **__atomic_get_pages(void *addr) { struct dma_pool *pool = &atomic_pool; struct page **pages = pool->pages; int offs = (addr - pool->vaddr) >> PAGE_SHIFT; return pages + offs; } static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) { struct vm_struct *area; if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) return __atomic_get_pages(cpu_addr); if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return cpu_addr; area = find_vm_area(cpu_addr); if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) return area->pages; return NULL; } static void *__iommu_alloc_atomic(struct device *dev, size_t size, dma_addr_t *handle) { struct page *page; void *addr; addr = __alloc_from_pool(size, &page); if (!addr) return NULL; *handle = __iommu_create_mapping(dev, &page, size); if (*handle == DMA_ERROR_CODE) goto err_mapping; return addr; err_mapping: __free_from_pool(addr, size); return NULL; } static void __iommu_free_atomic(struct device *dev, void *cpu_addr, dma_addr_t handle, size_t size) { __iommu_remove_mapping(dev, handle, size); __free_from_pool(cpu_addr, size); } static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); struct page **pages; void *addr = NULL; *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); if (gfp & GFP_ATOMIC) return __iommu_alloc_atomic(dev, size, handle); pages = __iommu_alloc_buffer(dev, size, gfp, attrs); if (!pages) return NULL; *handle = __iommu_create_mapping(dev, pages, size); if (*handle == DMA_ERROR_CODE) goto err_buffer; if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return pages; addr = __iommu_alloc_remap(pages, size, gfp, prot, __builtin_return_address(0)); if (!addr) goto err_mapping; return addr; err_mapping: __iommu_remove_mapping(dev, *handle, size); err_buffer: __iommu_free_buffer(dev, pages, size, attrs); return NULL; } static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { unsigned long uaddr = vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start; struct page **pages = __iommu_get_pages(cpu_addr, attrs); vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); if (!pages) return -ENXIO; do { int ret = vm_insert_page(vma, uaddr, *pages++); if (ret) { pr_err("Remapping memory failed: %d\n", ret); return ret; } uaddr += PAGE_SIZE; usize -= PAGE_SIZE; } while (usize > 0); return 0; } /* * free a page as defined by the above mapping. * Must not be called with IRQs disabled. */ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { struct page **pages = __iommu_get_pages(cpu_addr, attrs); size = PAGE_ALIGN(size); if (!pages) { WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); return; } if (__in_atomic_pool(cpu_addr, size)) { __iommu_free_atomic(dev, cpu_addr, handle, size); return; } if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); } __iommu_remove_mapping(dev, handle, size); __iommu_free_buffer(dev, pages, size, attrs); } static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page **pages = __iommu_get_pages(cpu_addr, attrs); if (!pages) return -ENXIO; return sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL); } /* * Map a part of the scatter-gather list into contiguous io address space */ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, size_t size, dma_addr_t *handle, enum dma_data_direction dir, struct dma_attrs *attrs, bool is_coherent) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova, iova_base; int ret = 0; unsigned int count; struct scatterlist *s; size = PAGE_ALIGN(size); *handle = DMA_ERROR_CODE; iova_base = iova = __alloc_iova(mapping, size); if (iova == DMA_ERROR_CODE) return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { phys_addr_t phys = page_to_phys(sg_page(s)); unsigned int len = PAGE_ALIGN(s->offset + s->length); if (!is_coherent && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); ret = iommu_map(mapping->domain, iova, phys, len, 0); if (ret < 0) goto fail; count += len >> PAGE_SHIFT; iova += len; } *handle = iova_base; return 0; fail: iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); __free_iova(mapping, iova_base, size); return ret; } static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs, bool is_coherent) { struct scatterlist *s = sg, *dma = sg, *start = sg; int i, count = 0; unsigned int offset = s->offset; unsigned int size = s->offset + s->length; unsigned int max = dma_get_max_seg_size(dev); for (i = 1; i < nents; i++) { s = sg_next(s); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, is_coherent) < 0) goto bad_mapping; dma->dma_address += offset; dma->dma_length = size - offset; size = offset = s->offset; start = s; dma = sg_next(dma); count += 1; } size += s->length; } if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, is_coherent) < 0) goto bad_mapping; dma->dma_address += offset; dma->dma_length = size - offset; return count+1; bad_mapping: for_each_sg(sg, s, count, i) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); return 0; } /** * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of i/o coherent buffers described by scatterlist in streaming * mode for DMA. The scatter gather list elements are merged together (if * possible) and tagged with the appropriate dma address and length. They are * obtained via sg_dma_{address,length}. */ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { return __iommu_map_sg(dev, sg, nents, dir, attrs, true); } /** * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming mode for DMA. * The scatter gather list elements are merged together (if possible) and * tagged with the appropriate dma address and length. They are obtained via * sg_dma_{address,length}. */ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { return __iommu_map_sg(dev, sg, nents, dir, attrs, false); } static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs, bool is_coherent) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { if (sg_dma_len(s)) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); if (!is_coherent && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } } /** * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to unmap (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); } /** * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to unmap (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); } /** * arm_iommu_sync_sg_for_cpu * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } /** * arm_iommu_sync_sg_for_device * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } /** * arm_coherent_iommu_map_page * @dev: valid struct device pointer * @page: page that buffer resides in * @offset: offset into page for start of buffer * @size: size of buffer to map * @dir: DMA transfer direction * * Coherent IOMMU aware version of arm_dma_map_page() */ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t dma_addr; int ret, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_ERROR_CODE) return dma_addr; ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); if (ret < 0) goto fail; return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); return DMA_ERROR_CODE; } /** * arm_iommu_map_page * @dev: valid struct device pointer * @page: page that buffer resides in * @offset: offset into page for start of buffer * @size: size of buffer to map * @dir: DMA transfer direction * * IOMMU aware version of arm_dma_map_page() */ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(page, offset, size, dir); return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); } /** * arm_coherent_iommu_unmap_page * @dev: valid struct device pointer * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) * * Coherent IOMMU aware version of arm_dma_unmap_page() */ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = handle & PAGE_MASK; int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); } /** * arm_iommu_unmap_page * @dev: valid struct device pointer * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) * * IOMMU aware version of arm_dma_unmap_page() */ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(page, offset, size, dir); iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); } static void arm_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; if (!iova) return; __dma_page_dev_to_cpu(page, offset, size, dir); } static void arm_iommu_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; if (!iova) return; __dma_page_cpu_to_dev(page, offset, size, dir); } struct dma_map_ops iommu_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, .mmap = arm_iommu_mmap_attrs, .get_sgtable = arm_iommu_get_sgtable, .map_page = arm_iommu_map_page, .unmap_page = arm_iommu_unmap_page, .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, .sync_single_for_device = arm_iommu_sync_single_for_device, .map_sg = arm_iommu_map_sg, .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, .set_dma_mask = arm_dma_set_mask, }; struct dma_map_ops iommu_coherent_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, .mmap = arm_iommu_mmap_attrs, .get_sgtable = arm_iommu_get_sgtable, .map_page = arm_coherent_iommu_map_page, .unmap_page = arm_coherent_iommu_unmap_page, .map_sg = arm_coherent_iommu_map_sg, .unmap_sg = arm_coherent_iommu_unmap_sg, .set_dma_mask = arm_dma_set_mask, }; /** * arm_iommu_create_mapping * @bus: pointer to the bus holding the client device (for IOMMU calls) * @base: start address of the valid IO address space * @size: size of the valid IO address space * @order: accuracy of the IO addresses allocations * * Creates a mapping structure which holds information about used/unused * IO address ranges, which is required to perform memory allocation and * mapping with IOMMU aware functions. * * The client device need to be attached to the mapping with * arm_iommu_attach_device function. */ struct dma_iommu_mapping * arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, int order) { unsigned int count = size >> (PAGE_SHIFT + order); unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); struct dma_iommu_mapping *mapping; int err = -ENOMEM; if (!count) return ERR_PTR(-EINVAL); mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); if (!mapping) goto err; mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!mapping->bitmap) goto err2; mapping->base = base; mapping->bits = BITS_PER_BYTE * bitmap_size; mapping->order = order; spin_lock_init(&mapping->lock); mapping->domain = iommu_domain_alloc(bus); if (!mapping->domain) goto err3; kref_init(&mapping->kref); return mapping; err3: kfree(mapping->bitmap); err2: kfree(mapping); err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); static void release_iommu_mapping(struct kref *kref) { struct dma_iommu_mapping *mapping = container_of(kref, struct dma_iommu_mapping, kref); iommu_domain_free(mapping->domain); kfree(mapping->bitmap); kfree(mapping); } void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) { if (mapping) kref_put(&mapping->kref, release_iommu_mapping); } EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); /** * arm_iommu_attach_device * @dev: valid struct device pointer * @mapping: io address space mapping structure (returned from * arm_iommu_create_mapping) * * Attaches specified io address space mapping to the provided device, * this replaces the dma operations (dma_map_ops pointer) with the * IOMMU aware version. More than one client might be attached to * the same io address space mapping. */ int arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping) { int err; err = iommu_attach_device(mapping->domain, dev); if (err) return err; kref_get(&mapping->kref); dev->archdata.mapping = mapping; set_dma_ops(dev, &iommu_ops); pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); return 0; } EXPORT_SYMBOL_GPL(arm_iommu_attach_device); /** * arm_iommu_detach_device * @dev: valid struct device pointer * * Detaches the provided device from a previously attached map. * This voids the dma operations (dma_map_ops pointer) */ void arm_iommu_detach_device(struct device *dev) { struct dma_iommu_mapping *mapping; mapping = to_dma_iommu_mapping(dev); if (!mapping) { dev_warn(dev, "Not attached\n"); return; } iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); mapping = NULL; set_dma_ops(dev, NULL); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); #endif
gpl-2.0
CyanogenMod/geeksphone-kernel-zero
arch/x86/kernel/cpu/mtrr/cleanup.c
314
27901
/* * MTRR (Memory Type Range Register) cleanup * * Copyright (C) 2009 Yinghai Lu * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/sort.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/kvm_para.h> #include <asm/processor.h> #include <asm/e820.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" struct res_range { unsigned long start; unsigned long end; }; struct var_mtrr_range_state { unsigned long base_pfn; unsigned long size_pfn; mtrr_type type; }; struct var_mtrr_state { unsigned long range_startk; unsigned long range_sizek; unsigned long chunk_sizek; unsigned long gran_sizek; unsigned int reg; }; /* Should be related to MTRR_VAR_RANGES nums */ #define RANGE_NUM 256 static struct res_range __initdata range[RANGE_NUM]; static int __initdata nr_range; static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; static int __initdata debug_print; #define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0) static int __init add_range(struct res_range *range, int nr_range, unsigned long start, unsigned long end) { /* Out of slots: */ if (nr_range >= RANGE_NUM) return nr_range; range[nr_range].start = start; range[nr_range].end = end; nr_range++; return nr_range; } static int __init add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, unsigned long end) { int i; /* Try to merge it with old one: */ for (i = 0; i < nr_range; i++) { unsigned long final_start, final_end; unsigned long common_start, common_end; if (!range[i].end) continue; common_start = max(range[i].start, start); common_end = min(range[i].end, end); if (common_start > common_end + 1) continue; final_start = min(range[i].start, start); final_end = max(range[i].end, end); range[i].start = final_start; range[i].end = final_end; return nr_range; } /* Need to add it: */ return add_range(range, nr_range, start, end); } static void __init subtract_range(struct res_range *range, unsigned long start, unsigned long end) { int i, j; for (j = 0; j < RANGE_NUM; j++) { if (!range[j].end) continue; if (start <= range[j].start && end >= range[j].end) { range[j].start = 0; range[j].end = 0; continue; } if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) { range[j].start = end + 1; continue; } if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) { range[j].end = start - 1; continue; } if (start > range[j].start && end < range[j].end) { /* Find the new spare: */ for (i = 0; i < RANGE_NUM; i++) { if (range[i].end == 0) break; } if (i < RANGE_NUM) { range[i].end = range[j].end; range[i].start = end + 1; } else { printk(KERN_ERR "run of slot in ranges\n"); } range[j].end = start - 1; continue; } } } static int __init cmp_range(const void *x1, const void *x2) { const struct res_range *r1 = x1; const struct res_range *r2 = x2; long start1, start2; start1 = r1->start; start2 = r2->start; return start1 - start2; } #define BIOS_BUG_MSG KERN_WARNING \ "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" static int __init x86_get_mtrr_mem_range(struct res_range *range, int nr_range, unsigned long extra_remove_base, unsigned long extra_remove_size) { unsigned long base, size; mtrr_type type; int i; for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; if (type != MTRR_TYPE_WRBACK) continue; base = range_state[i].base_pfn; size = range_state[i].size_pfn; nr_range = add_range_with_merge(range, nr_range, base, base + size - 1); } if (debug_print) { printk(KERN_DEBUG "After WB checking\n"); for (i = 0; i < nr_range; i++) printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1); } /* Take out UC ranges: */ for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; if (type != MTRR_TYPE_UNCACHABLE && type != MTRR_TYPE_WRPROT) continue; size = range_state[i].size_pfn; if (!size) continue; base = range_state[i].base_pfn; if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && (mtrr_state.enabled & 1)) { /* Var MTRR contains UC entry below 1M? Skip it: */ printk(BIOS_BUG_MSG, i); if (base + size <= (1<<(20-PAGE_SHIFT))) continue; size -= (1<<(20-PAGE_SHIFT)) - base; base = 1<<(20-PAGE_SHIFT); } subtract_range(range, base, base + size - 1); } if (extra_remove_size) subtract_range(range, extra_remove_base, extra_remove_base + extra_remove_size - 1); /* get new range num */ nr_range = 0; for (i = 0; i < RANGE_NUM; i++) { if (!range[i].end) continue; nr_range++; } if (debug_print) { printk(KERN_DEBUG "After UC checking\n"); for (i = 0; i < nr_range; i++) printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1); } /* sort the ranges */ sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); if (debug_print) { printk(KERN_DEBUG "After sorting\n"); for (i = 0; i < nr_range; i++) printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1); } /* clear those is not used */ for (i = nr_range; i < RANGE_NUM; i++) memset(&range[i], 0, sizeof(range[i])); return nr_range; } #ifdef CONFIG_MTRR_SANITIZER static unsigned long __init sum_ranges(struct res_range *range, int nr_range) { unsigned long sum = 0; int i; for (i = 0; i < nr_range; i++) sum += range[i].end + 1 - range[i].start; return sum; } static int enable_mtrr_cleanup __initdata = CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT; static int __init disable_mtrr_cleanup_setup(char *str) { enable_mtrr_cleanup = 0; return 0; } early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup); static int __init enable_mtrr_cleanup_setup(char *str) { enable_mtrr_cleanup = 1; return 0; } early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); static int __init mtrr_cleanup_debug_setup(char *str) { debug_print = 1; return 0; } early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); static void __init set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, unsigned char type, unsigned int address_bits) { u32 base_lo, base_hi, mask_lo, mask_hi; u64 base, mask; if (!sizek) { fill_mtrr_var_range(reg, 0, 0, 0, 0); return; } mask = (1ULL << address_bits) - 1; mask &= ~((((u64)sizek) << 10) - 1); base = ((u64)basek) << 10; base |= type; mask |= 0x800; base_lo = base & ((1ULL<<32) - 1); base_hi = base >> 32; mask_lo = mask & ((1ULL<<32) - 1); mask_hi = mask >> 32; fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi); } static void __init save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, unsigned char type) { range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); range_state[reg].type = type; } static void __init set_var_mtrr_all(unsigned int address_bits) { unsigned long basek, sizek; unsigned char type; unsigned int reg; for (reg = 0; reg < num_var_ranges; reg++) { basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10); sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10); type = range_state[reg].type; set_var_mtrr(reg, basek, sizek, type, address_bits); } } static unsigned long to_size_factor(unsigned long sizek, char *factorp) { unsigned long base = sizek; char factor; if (base & ((1<<10) - 1)) { /* Not MB-aligned: */ factor = 'K'; } else if (base & ((1<<20) - 1)) { factor = 'M'; base >>= 10; } else { factor = 'G'; base >>= 20; } *factorp = factor; return base; } static unsigned int __init range_to_mtrr(unsigned int reg, unsigned long range_startk, unsigned long range_sizek, unsigned char type) { if (!range_sizek || (reg >= num_var_ranges)) return reg; while (range_sizek) { unsigned long max_align, align; unsigned long sizek; /* Compute the maximum size with which we can make a range: */ if (range_startk) max_align = ffs(range_startk) - 1; else max_align = 32; align = fls(range_sizek) - 1; if (align > max_align) align = max_align; sizek = 1 << align; if (debug_print) { char start_factor = 'K', size_factor = 'K'; unsigned long start_base, size_base; start_base = to_size_factor(range_startk, &start_factor); size_base = to_size_factor(sizek, &size_factor); Dprintk("Setting variable MTRR %d, " "base: %ld%cB, range: %ld%cB, type %s\n", reg, start_base, start_factor, size_base, size_factor, (type == MTRR_TYPE_UNCACHABLE) ? "UC" : ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other") ); } save_var_mtrr(reg++, range_startk, sizek, type); range_startk += sizek; range_sizek -= sizek; if (reg >= num_var_ranges) break; } return reg; } static unsigned __init range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, unsigned long sizek) { unsigned long hole_basek, hole_sizek; unsigned long second_basek, second_sizek; unsigned long range0_basek, range0_sizek; unsigned long range_basek, range_sizek; unsigned long chunk_sizek; unsigned long gran_sizek; hole_basek = 0; hole_sizek = 0; second_basek = 0; second_sizek = 0; chunk_sizek = state->chunk_sizek; gran_sizek = state->gran_sizek; /* Align with gran size, prevent small block used up MTRRs: */ range_basek = ALIGN(state->range_startk, gran_sizek); if ((range_basek > basek) && basek) return second_sizek; state->range_sizek -= (range_basek - state->range_startk); range_sizek = ALIGN(state->range_sizek, gran_sizek); while (range_sizek > state->range_sizek) { range_sizek -= gran_sizek; if (!range_sizek) return 0; } state->range_sizek = range_sizek; /* Try to append some small hole: */ range0_basek = state->range_startk; range0_sizek = ALIGN(state->range_sizek, chunk_sizek); /* No increase: */ if (range0_sizek == state->range_sizek) { Dprintk("rangeX: %016lx - %016lx\n", range0_basek<<10, (range0_basek + state->range_sizek)<<10); state->reg = range_to_mtrr(state->reg, range0_basek, state->range_sizek, MTRR_TYPE_WRBACK); return 0; } /* Only cut back when it is not the last: */ if (sizek) { while (range0_basek + range0_sizek > (basek + sizek)) { if (range0_sizek >= chunk_sizek) range0_sizek -= chunk_sizek; else range0_sizek = 0; if (!range0_sizek) break; } } second_try: range_basek = range0_basek + range0_sizek; /* One hole in the middle: */ if (range_basek > basek && range_basek <= (basek + sizek)) second_sizek = range_basek - basek; if (range0_sizek > state->range_sizek) { /* One hole in middle or at the end: */ hole_sizek = range0_sizek - state->range_sizek - second_sizek; /* Hole size should be less than half of range0 size: */ if (hole_sizek >= (range0_sizek >> 1) && range0_sizek >= chunk_sizek) { range0_sizek -= chunk_sizek; second_sizek = 0; hole_sizek = 0; goto second_try; } } if (range0_sizek) { Dprintk("range0: %016lx - %016lx\n", range0_basek<<10, (range0_basek + range0_sizek)<<10); state->reg = range_to_mtrr(state->reg, range0_basek, range0_sizek, MTRR_TYPE_WRBACK); } if (range0_sizek < state->range_sizek) { /* Need to handle left over range: */ range_sizek = state->range_sizek - range0_sizek; Dprintk("range: %016lx - %016lx\n", range_basek<<10, (range_basek + range_sizek)<<10); state->reg = range_to_mtrr(state->reg, range_basek, range_sizek, MTRR_TYPE_WRBACK); } if (hole_sizek) { hole_basek = range_basek - hole_sizek - second_sizek; Dprintk("hole: %016lx - %016lx\n", hole_basek<<10, (hole_basek + hole_sizek)<<10); state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek, MTRR_TYPE_UNCACHABLE); } return second_sizek; } static void __init set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, unsigned long size_pfn) { unsigned long basek, sizek; unsigned long second_sizek = 0; if (state->reg >= num_var_ranges) return; basek = base_pfn << (PAGE_SHIFT - 10); sizek = size_pfn << (PAGE_SHIFT - 10); /* See if I can merge with the last range: */ if ((basek <= 1024) || (state->range_startk + state->range_sizek == basek)) { unsigned long endk = basek + sizek; state->range_sizek = endk - state->range_startk; return; } /* Write the range mtrrs: */ if (state->range_sizek != 0) second_sizek = range_to_mtrr_with_hole(state, basek, sizek); /* Allocate an msr: */ state->range_startk = basek + second_sizek; state->range_sizek = sizek - second_sizek; } /* Mininum size of mtrr block that can take hole: */ static u64 mtrr_chunk_size __initdata = (256ULL<<20); static int __init parse_mtrr_chunk_size_opt(char *p) { if (!p) return -EINVAL; mtrr_chunk_size = memparse(p, &p); return 0; } early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); /* Granularity of mtrr of block: */ static u64 mtrr_gran_size __initdata; static int __init parse_mtrr_gran_size_opt(char *p) { if (!p) return -EINVAL; mtrr_gran_size = memparse(p, &p); return 0; } early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); static unsigned long nr_mtrr_spare_reg __initdata = CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; static int __init parse_mtrr_spare_reg(char *arg) { if (arg) nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); return 0; } early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); static int __init x86_setup_var_mtrrs(struct res_range *range, int nr_range, u64 chunk_size, u64 gran_size) { struct var_mtrr_state var_state; int num_reg; int i; var_state.range_startk = 0; var_state.range_sizek = 0; var_state.reg = 0; var_state.chunk_sizek = chunk_size >> 10; var_state.gran_sizek = gran_size >> 10; memset(range_state, 0, sizeof(range_state)); /* Write the range: */ for (i = 0; i < nr_range; i++) { set_var_mtrr_range(&var_state, range[i].start, range[i].end - range[i].start + 1); } /* Write the last range: */ if (var_state.range_sizek != 0) range_to_mtrr_with_hole(&var_state, 0, 0); num_reg = var_state.reg; /* Clear out the extra MTRR's: */ while (var_state.reg < num_var_ranges) { save_var_mtrr(var_state.reg, 0, 0, 0); var_state.reg++; } return num_reg; } struct mtrr_cleanup_result { unsigned long gran_sizek; unsigned long chunk_sizek; unsigned long lose_cover_sizek; unsigned int num_reg; int bad; }; /* * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G * chunk size: gran_size, ..., 2G * so we need (1+16)*8 */ #define NUM_RESULT 136 #define PSHIFT (PAGE_SHIFT - 10) static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; static unsigned long __initdata min_loss_pfn[RANGE_NUM]; static void __init print_out_mtrr_range_state(void) { char start_factor = 'K', size_factor = 'K'; unsigned long start_base, size_base; mtrr_type type; int i; for (i = 0; i < num_var_ranges; i++) { size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); if (!size_base) continue; size_base = to_size_factor(size_base, &size_factor), start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); start_base = to_size_factor(start_base, &start_factor), type = range_state[i].type; printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", i, start_base, start_factor, size_base, size_factor, (type == MTRR_TYPE_UNCACHABLE) ? "UC" : ((type == MTRR_TYPE_WRPROT) ? "WP" : ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")) ); } } static int __init mtrr_need_cleanup(void) { int i; mtrr_type type; unsigned long size; /* Extra one for all 0: */ int num[MTRR_NUM_TYPES + 1]; /* Check entries number: */ memset(num, 0, sizeof(num)); for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; size = range_state[i].size_pfn; if (type >= MTRR_NUM_TYPES) continue; if (!size) type = MTRR_NUM_TYPES; if (type == MTRR_TYPE_WRPROT) type = MTRR_TYPE_UNCACHABLE; num[type]++; } /* Check if we got UC entries: */ if (!num[MTRR_TYPE_UNCACHABLE]) return 0; /* Check if we only had WB and UC */ if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != num_var_ranges - num[MTRR_NUM_TYPES]) return 0; return 1; } static unsigned long __initdata range_sums; static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, unsigned long x_remove_base, unsigned long x_remove_size, int i) { static struct res_range range_new[RANGE_NUM]; unsigned long range_sums_new; static int nr_range_new; int num_reg; /* Convert ranges to var ranges state: */ num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); /* We got new setting in range_state, check it: */ memset(range_new, 0, sizeof(range_new)); nr_range_new = x86_get_mtrr_mem_range(range_new, 0, x_remove_base, x_remove_size); range_sums_new = sum_ranges(range_new, nr_range_new); result[i].chunk_sizek = chunk_size >> 10; result[i].gran_sizek = gran_size >> 10; result[i].num_reg = num_reg; if (range_sums < range_sums_new) { result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT; result[i].bad = 1; } else { result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT; } /* Double check it: */ if (!result[i].bad && !result[i].lose_cover_sizek) { if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range))) result[i].bad = 1; } if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg])) min_loss_pfn[num_reg] = range_sums - range_sums_new; } static void __init mtrr_print_out_one_result(int i) { unsigned long gran_base, chunk_base, lose_base; char gran_factor, chunk_factor, lose_factor; gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t", result[i].bad ? "*BAD*" : " ", gran_base, gran_factor, chunk_base, chunk_factor); pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n", result[i].num_reg, result[i].bad ? "-" : "", lose_base, lose_factor); } static int __init mtrr_search_optimal_index(void) { int num_reg_good; int index_good; int i; if (nr_mtrr_spare_reg >= num_var_ranges) nr_mtrr_spare_reg = num_var_ranges - 1; num_reg_good = -1; for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { if (!min_loss_pfn[i]) num_reg_good = i; } index_good = -1; if (num_reg_good != -1) { for (i = 0; i < NUM_RESULT; i++) { if (!result[i].bad && result[i].num_reg == num_reg_good && !result[i].lose_cover_sizek) { index_good = i; break; } } } return index_good; } int __init mtrr_cleanup(unsigned address_bits) { unsigned long x_remove_base, x_remove_size; unsigned long base, size, def, dummy; u64 chunk_size, gran_size; mtrr_type type; int index_good; int i; if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) return 0; rdmsr(MSR_MTRRdefType, def, dummy); def &= 0xff; if (def != MTRR_TYPE_UNCACHABLE) return 0; /* Get it and store it aside: */ memset(range_state, 0, sizeof(range_state)); for (i = 0; i < num_var_ranges; i++) { mtrr_if->get(i, &base, &size, &type); range_state[i].base_pfn = base; range_state[i].size_pfn = size; range_state[i].type = type; } /* Check if we need handle it and can handle it: */ if (!mtrr_need_cleanup()) return 0; /* Print original var MTRRs at first, for debugging: */ printk(KERN_DEBUG "original variable MTRRs\n"); print_out_mtrr_range_state(); memset(range, 0, sizeof(range)); x_remove_size = 0; x_remove_base = 1 << (32 - PAGE_SHIFT); if (mtrr_tom2) x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); /* * [0, 1M) should always be covered by var mtrr with WB * and fixed mtrrs should take effect before var mtrr for it: */ nr_range = add_range_with_merge(range, nr_range, 0, (1ULL<<(20 - PAGE_SHIFT)) - 1); /* Sort the ranges: */ sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); range_sums = sum_ranges(range, nr_range); printk(KERN_INFO "total RAM covered: %ldM\n", range_sums >> (20 - PAGE_SHIFT)); if (mtrr_chunk_size && mtrr_gran_size) { i = 0; mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, x_remove_base, x_remove_size, i); mtrr_print_out_one_result(i); if (!result[i].bad) { set_var_mtrr_all(address_bits); printk(KERN_DEBUG "New variable MTRRs\n"); print_out_mtrr_range_state(); return 1; } printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " "will find optimal one\n"); } i = 0; memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); memset(result, 0, sizeof(result)); for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { for (chunk_size = gran_size; chunk_size < (1ULL<<32); chunk_size <<= 1) { if (i >= NUM_RESULT) continue; mtrr_calc_range_state(chunk_size, gran_size, x_remove_base, x_remove_size, i); if (debug_print) { mtrr_print_out_one_result(i); printk(KERN_INFO "\n"); } i++; } } /* Try to find the optimal index: */ index_good = mtrr_search_optimal_index(); if (index_good != -1) { printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); i = index_good; mtrr_print_out_one_result(i); /* Convert ranges to var ranges state: */ chunk_size = result[i].chunk_sizek; chunk_size <<= 10; gran_size = result[i].gran_sizek; gran_size <<= 10; x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); set_var_mtrr_all(address_bits); printk(KERN_DEBUG "New variable MTRRs\n"); print_out_mtrr_range_state(); return 1; } else { /* print out all */ for (i = 0; i < NUM_RESULT; i++) mtrr_print_out_one_result(i); } printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n"); printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n"); return 0; } #else int __init mtrr_cleanup(unsigned address_bits) { return 0; } #endif static int disable_mtrr_trim; static int __init disable_mtrr_trim_setup(char *str) { disable_mtrr_trim = 1; return 0; } early_param("disable_mtrr_trim", disable_mtrr_trim_setup); /* * Newer AMD K8s and later CPUs have a special magic MSR way to force WB * for memory >4GB. Check for that here. * Note this won't check if the MTRRs < 4GB where the magic bit doesn't * apply to are wrong, but so far we don't know of any such case in the wild. */ #define Tom2Enabled (1U << 21) #define Tom2ForceMemTypeWB (1U << 22) int __init amd_special_default_mtrr(void) { u32 l, h; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return 0; if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) return 0; /* In case some hypervisor doesn't pass SYSCFG through: */ if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) return 0; /* * Memory between 4GB and top of mem is forced WB by this magic bit. * Reserved before K8RevF, but should be zero there. */ if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) == (Tom2Enabled | Tom2ForceMemTypeWB)) return 1; return 0; } static u64 __init real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) { u64 trim_start, trim_size; trim_start = start_pfn; trim_start <<= PAGE_SHIFT; trim_size = limit_pfn; trim_size <<= PAGE_SHIFT; trim_size -= trim_start; return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED); } /** * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs * @end_pfn: ending page frame number * * Some buggy BIOSes don't setup the MTRRs properly for systems with certain * memory configurations. This routine checks that the highest MTRR matches * the end of memory, to make sure the MTRRs having a write back type cover * all of the memory the kernel is intending to use. If not, it'll trim any * memory off the end by adjusting end_pfn, removing it from the kernel's * allocation pools, warning the user with an obnoxious message. */ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) { unsigned long i, base, size, highest_pfn = 0, def, dummy; mtrr_type type; u64 total_trim_size; /* extra one for all 0 */ int num[MTRR_NUM_TYPES + 1]; /* * Make sure we only trim uncachable memory on machines that * support the Intel MTRR architecture: */ if (!is_cpu(INTEL) || disable_mtrr_trim) return 0; rdmsr(MSR_MTRRdefType, def, dummy); def &= 0xff; if (def != MTRR_TYPE_UNCACHABLE) return 0; /* Get it and store it aside: */ memset(range_state, 0, sizeof(range_state)); for (i = 0; i < num_var_ranges; i++) { mtrr_if->get(i, &base, &size, &type); range_state[i].base_pfn = base; range_state[i].size_pfn = size; range_state[i].type = type; } /* Find highest cached pfn: */ for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; if (type != MTRR_TYPE_WRBACK) continue; base = range_state[i].base_pfn; size = range_state[i].size_pfn; if (highest_pfn < base + size) highest_pfn = base + size; } /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ if (!highest_pfn) { printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); return 0; } /* Check entries number: */ memset(num, 0, sizeof(num)); for (i = 0; i < num_var_ranges; i++) { type = range_state[i].type; if (type >= MTRR_NUM_TYPES) continue; size = range_state[i].size_pfn; if (!size) type = MTRR_NUM_TYPES; num[type]++; } /* No entry for WB? */ if (!num[MTRR_TYPE_WRBACK]) return 0; /* Check if we only had WB and UC: */ if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != num_var_ranges - num[MTRR_NUM_TYPES]) return 0; memset(range, 0, sizeof(range)); nr_range = 0; if (mtrr_tom2) { range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1; if (highest_pfn < range[nr_range].end + 1) highest_pfn = range[nr_range].end + 1; nr_range++; } nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); /* Check the head: */ total_trim_size = 0; if (range[0].start) total_trim_size += real_trim_memory(0, range[0].start); /* Check the holes: */ for (i = 0; i < nr_range - 1; i++) { if (range[i].end + 1 < range[i+1].start) total_trim_size += real_trim_memory(range[i].end + 1, range[i+1].start); } /* Check the top: */ i = nr_range - 1; if (range[i].end + 1 < end_pfn) total_trim_size += real_trim_memory(range[i].end + 1, end_pfn); if (total_trim_size) { pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20); if (!changed_by_mtrr_cleanup) WARN_ON(1); pr_info("update e820 for mtrr\n"); update_e820(); return 1; } return 0; }
gpl-2.0
cbunting99/OpenCataclysm
dep/acelite/ace/Vector_T.cpp
570
1997
// $Id: Vector_T.cpp 92069 2010-09-28 11:38:59Z johnnyw $ #ifndef ACE_VECTOR_T_CPP #define ACE_VECTOR_T_CPP #if !defined (ACE_LACKS_PRAGMA_ONCE) # pragma once #endif /* ACE_LACKS_PRAGMA_ONCE */ #include "ace/Vector_T.h" #if !defined (__ACE_INLINE__) #include "ace/Vector_T.inl" #endif /* __ACE_INLINE__ */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_ALLOC_HOOK_DEFINE(ACE_Vector) template <class T, size_t DEFAULT_SIZE> void ACE_Vector<T, DEFAULT_SIZE>::resize (const size_t new_size, const T& t) { ACE_Array<T>::size (new_size); if (new_size > length_) for (size_t i = length_; i < new_size; ++i) (*this)[i]=t; curr_max_size_ = this->max_size (); length_ = new_size; } template <class T, size_t DEFAULT_SIZE> void ACE_Vector<T, DEFAULT_SIZE>::push_back (const T& elem) { if (length_ == curr_max_size_) { ACE_Array<T>::size (curr_max_size_ * 2); curr_max_size_ = this->max_size (); } else ACE_Array<T>::size (length_ + 1); ++length_; (*this)[length_-1] = elem; } template <class T, size_t DEFAULT_SIZE> void ACE_Vector<T, DEFAULT_SIZE>::dump (void) const { } // Compare this vector with <s> for equality. template <class T, size_t DEFAULT_SIZE> bool ACE_Vector<T, DEFAULT_SIZE>::operator== (const ACE_Vector<T, DEFAULT_SIZE> &s) const { if (this == &s) return true; else if (this->size () != s.size ()) return false; const size_t len = s.size (); for (size_t slot = 0; slot < len; ++slot) if ((*this)[slot] != s[slot]) return false; return true; } // **************************************************************** template <class T, size_t DEFAULT_SIZE> int ACE_Vector_Iterator<T, DEFAULT_SIZE>::next (T *&item) { // ACE_TRACE ("ACE_Vector_Iterator<T>::next"); if (this->done ()) { item = 0; return 0; } else { item = &vector_[current_]; return 1; } } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_VECTOR_T_CPP */
gpl-2.0
eckucukoglu/sober-kernel
drivers/iio/industrialio-buffer.c
826
24083
/* The industrial I/O core * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Handling of buffer allocation / resizing. * * * Things to look at here. * - Better memory allocation techniques? * - Alternative access techniques? */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/iio/iio.h> #include "iio_core.h" #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> static const char * const iio_endian_prefix[] = { [IIO_BE] = "be", [IIO_LE] = "le", }; static bool iio_buffer_is_active(struct iio_dev *indio_dev, struct iio_buffer *buf) { struct list_head *p; list_for_each(p, &indio_dev->buffer_list) if (p == &buf->buffer_list) return true; return false; } /** * iio_buffer_read_first_n_outer() - chrdev read for buffer access * * This function relies on all buffer implementations having an * iio_buffer as their first element. **/ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, size_t n, loff_t *f_ps) { struct iio_dev *indio_dev = filp->private_data; struct iio_buffer *rb = indio_dev->buffer; if (!rb || !rb->access->read_first_n) return -EINVAL; return rb->access->read_first_n(rb, n, buf); } /** * iio_buffer_poll() - poll the buffer to find out if it has data */ unsigned int iio_buffer_poll(struct file *filp, struct poll_table_struct *wait) { struct iio_dev *indio_dev = filp->private_data; struct iio_buffer *rb = indio_dev->buffer; poll_wait(filp, &rb->pollq, wait); if (rb->stufftoread) return POLLIN | POLLRDNORM; /* need a way of knowing if there may be enough data... */ return 0; } void iio_buffer_init(struct iio_buffer *buffer) { INIT_LIST_HEAD(&buffer->demux_list); init_waitqueue_head(&buffer->pollq); } EXPORT_SYMBOL(iio_buffer_init); static ssize_t iio_show_scan_index(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); } static ssize_t iio_show_fixed_type(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); u8 type = this_attr->c->scan_type.endianness; if (type == IIO_CPU) { #ifdef __LITTLE_ENDIAN type = IIO_LE; #else type = IIO_BE; #endif } return sprintf(buf, "%s:%c%d/%d>>%u\n", iio_endian_prefix[type], this_attr->c->scan_type.sign, this_attr->c->scan_type.realbits, this_attr->c->scan_type.storagebits, this_attr->c->scan_type.shift); } static ssize_t iio_scan_el_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct iio_dev *indio_dev = dev_to_iio_dev(dev); /* Ensure ret is 0 or 1. */ ret = !!test_bit(to_iio_dev_attr(attr)->address, indio_dev->buffer->scan_mask); return sprintf(buf, "%d\n", ret); } static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) { clear_bit(bit, buffer->scan_mask); return 0; } static ssize_t iio_scan_el_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; bool state; struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_buffer *buffer = indio_dev->buffer; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = strtobool(buf, &state); if (ret < 0) return ret; mutex_lock(&indio_dev->mlock); if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { ret = -EBUSY; goto error_ret; } ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); if (ret < 0) goto error_ret; if (!state && ret) { ret = iio_scan_mask_clear(buffer, this_attr->address); if (ret) goto error_ret; } else if (state && !ret) { ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); if (ret) goto error_ret; } error_ret: mutex_unlock(&indio_dev->mlock); return ret < 0 ? ret : len; } static ssize_t iio_scan_el_ts_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); } static ssize_t iio_scan_el_ts_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; struct iio_dev *indio_dev = dev_to_iio_dev(dev); bool state; ret = strtobool(buf, &state); if (ret < 0) return ret; mutex_lock(&indio_dev->mlock); if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { ret = -EBUSY; goto error_ret; } indio_dev->buffer->scan_timestamp = state; error_ret: mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, const struct iio_chan_spec *chan) { int ret, attrcount = 0; struct iio_buffer *buffer = indio_dev->buffer; ret = __iio_add_chan_devattr("index", chan, &iio_show_scan_index, NULL, 0, 0, &indio_dev->dev, &buffer->scan_el_dev_attr_list); if (ret) goto error_ret; attrcount++; ret = __iio_add_chan_devattr("type", chan, &iio_show_fixed_type, NULL, 0, 0, &indio_dev->dev, &buffer->scan_el_dev_attr_list); if (ret) goto error_ret; attrcount++; if (chan->type != IIO_TIMESTAMP) ret = __iio_add_chan_devattr("en", chan, &iio_scan_el_show, &iio_scan_el_store, chan->scan_index, 0, &indio_dev->dev, &buffer->scan_el_dev_attr_list); else ret = __iio_add_chan_devattr("en", chan, &iio_scan_el_ts_show, &iio_scan_el_ts_store, chan->scan_index, 0, &indio_dev->dev, &buffer->scan_el_dev_attr_list); attrcount++; ret = attrcount; error_ret: return ret; } static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev, struct iio_dev_attr *p) { kfree(p->dev_attr.attr.name); kfree(p); } static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev) { struct iio_dev_attr *p, *n; struct iio_buffer *buffer = indio_dev->buffer; list_for_each_entry_safe(p, n, &buffer->scan_el_dev_attr_list, l) iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p); } static const char * const iio_scan_elements_group_name = "scan_elements"; int iio_buffer_register(struct iio_dev *indio_dev, const struct iio_chan_spec *channels, int num_channels) { struct iio_dev_attr *p; struct attribute **attr; struct iio_buffer *buffer = indio_dev->buffer; int ret, i, attrn, attrcount, attrcount_orig = 0; if (buffer->attrs) indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; if (buffer->scan_el_attrs != NULL) { attr = buffer->scan_el_attrs->attrs; while (*attr++ != NULL) attrcount_orig++; } attrcount = attrcount_orig; INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); if (channels) { /* new magic */ for (i = 0; i < num_channels; i++) { if (channels[i].scan_index < 0) continue; /* Establish necessary mask length */ if (channels[i].scan_index > (int)indio_dev->masklength - 1) indio_dev->masklength = channels[i].scan_index + 1; ret = iio_buffer_add_channel_sysfs(indio_dev, &channels[i]); if (ret < 0) goto error_cleanup_dynamic; attrcount += ret; if (channels[i].type == IIO_TIMESTAMP) indio_dev->scan_index_timestamp = channels[i].scan_index; } if (indio_dev->masklength && buffer->scan_mask == NULL) { buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(*buffer->scan_mask), GFP_KERNEL); if (buffer->scan_mask == NULL) { ret = -ENOMEM; goto error_cleanup_dynamic; } } } buffer->scan_el_group.name = iio_scan_elements_group_name; buffer->scan_el_group.attrs = kcalloc(attrcount + 1, sizeof(buffer->scan_el_group.attrs[0]), GFP_KERNEL); if (buffer->scan_el_group.attrs == NULL) { ret = -ENOMEM; goto error_free_scan_mask; } if (buffer->scan_el_attrs) memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); attrn = attrcount_orig; list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; return 0; error_free_scan_mask: kfree(buffer->scan_mask); error_cleanup_dynamic: __iio_buffer_attr_cleanup(indio_dev); return ret; } EXPORT_SYMBOL(iio_buffer_register); void iio_buffer_unregister(struct iio_dev *indio_dev) { kfree(indio_dev->buffer->scan_mask); kfree(indio_dev->buffer->scan_el_group.attrs); __iio_buffer_attr_cleanup(indio_dev); } EXPORT_SYMBOL(iio_buffer_unregister); ssize_t iio_buffer_read_length(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_buffer *buffer = indio_dev->buffer; if (buffer->access->get_length) return sprintf(buf, "%d\n", buffer->access->get_length(buffer)); return 0; } EXPORT_SYMBOL(iio_buffer_read_length); ssize_t iio_buffer_write_length(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_buffer *buffer = indio_dev->buffer; unsigned int val; int ret; ret = kstrtouint(buf, 10, &val); if (ret) return ret; if (buffer->access->get_length) if (val == buffer->access->get_length(buffer)) return len; mutex_lock(&indio_dev->mlock); if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) { ret = -EBUSY; } else { if (buffer->access->set_length) buffer->access->set_length(buffer, val); ret = 0; } mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } EXPORT_SYMBOL(iio_buffer_write_length); ssize_t iio_buffer_show_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev, indio_dev->buffer)); } EXPORT_SYMBOL(iio_buffer_show_enable); /* note NULL used as error indicator as it doesn't make sense. */ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, unsigned int masklength, const unsigned long *mask) { if (bitmap_empty(mask, masklength)) return NULL; while (*av_masks) { if (bitmap_subset(mask, av_masks, masklength)) return av_masks; av_masks += BITS_TO_LONGS(masklength); } return NULL; } static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask, bool timestamp) { const struct iio_chan_spec *ch; unsigned bytes = 0; int length, i; /* How much space will the demuxed element take? */ for_each_set_bit(i, mask, indio_dev->masklength) { ch = iio_find_channel_from_si(indio_dev, i); length = ch->scan_type.storagebits / 8; bytes = ALIGN(bytes, length); bytes += length; } if (timestamp) { ch = iio_find_channel_from_si(indio_dev, indio_dev->scan_index_timestamp); length = ch->scan_type.storagebits / 8; bytes = ALIGN(bytes, length); bytes += length; } return bytes; } int iio_update_buffers(struct iio_dev *indio_dev, struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer) { int ret; int success = 0; struct iio_buffer *buffer; unsigned long *compound_mask; const unsigned long *old_mask; /* Wind down existing buffers - iff there are any */ if (!list_empty(&indio_dev->buffer_list)) { if (indio_dev->setup_ops->predisable) { ret = indio_dev->setup_ops->predisable(indio_dev); if (ret) goto error_ret; } indio_dev->currentmode = INDIO_DIRECT_MODE; if (indio_dev->setup_ops->postdisable) { ret = indio_dev->setup_ops->postdisable(indio_dev); if (ret) goto error_ret; } } /* Keep a copy of current setup to allow roll back */ old_mask = indio_dev->active_scan_mask; if (!indio_dev->available_scan_masks) indio_dev->active_scan_mask = NULL; if (remove_buffer) list_del(&remove_buffer->buffer_list); if (insert_buffer) list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list); /* If no buffers in list, we are done */ if (list_empty(&indio_dev->buffer_list)) { indio_dev->currentmode = INDIO_DIRECT_MODE; if (indio_dev->available_scan_masks == NULL) kfree(old_mask); return 0; } /* What scan mask do we actually have ?*/ compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long), GFP_KERNEL); if (compound_mask == NULL) { if (indio_dev->available_scan_masks == NULL) kfree(old_mask); return -ENOMEM; } indio_dev->scan_timestamp = 0; list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { bitmap_or(compound_mask, compound_mask, buffer->scan_mask, indio_dev->masklength); indio_dev->scan_timestamp |= buffer->scan_timestamp; } if (indio_dev->available_scan_masks) { indio_dev->active_scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, indio_dev->masklength, compound_mask); if (indio_dev->active_scan_mask == NULL) { /* * Roll back. * Note can only occur when adding a buffer. */ list_del(&insert_buffer->buffer_list); indio_dev->active_scan_mask = old_mask; success = -EINVAL; } } else { indio_dev->active_scan_mask = compound_mask; } iio_update_demux(indio_dev); /* Wind up again */ if (indio_dev->setup_ops->preenable) { ret = indio_dev->setup_ops->preenable(indio_dev); if (ret) { printk(KERN_ERR "Buffer not started:" "buffer preenable failed\n"); goto error_remove_inserted; } } indio_dev->scan_bytes = iio_compute_scan_bytes(indio_dev, indio_dev->active_scan_mask, indio_dev->scan_timestamp); list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) if (buffer->access->request_update) { ret = buffer->access->request_update(buffer); if (ret) { printk(KERN_INFO "Buffer not started:" "buffer parameter update failed\n"); goto error_run_postdisable; } } if (indio_dev->info->update_scan_mode) { ret = indio_dev->info ->update_scan_mode(indio_dev, indio_dev->active_scan_mask); if (ret < 0) { printk(KERN_INFO "update scan mode failed\n"); goto error_run_postdisable; } } /* Definitely possible for devices to support both of these.*/ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { if (!indio_dev->trig) { printk(KERN_INFO "Buffer not started: no trigger\n"); ret = -EINVAL; /* Can only occur on first buffer */ goto error_run_postdisable; } indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { indio_dev->currentmode = INDIO_BUFFER_HARDWARE; } else { /* should never be reached */ ret = -EINVAL; goto error_run_postdisable; } if (indio_dev->setup_ops->postenable) { ret = indio_dev->setup_ops->postenable(indio_dev); if (ret) { printk(KERN_INFO "Buffer not started: postenable failed\n"); indio_dev->currentmode = INDIO_DIRECT_MODE; if (indio_dev->setup_ops->postdisable) indio_dev->setup_ops->postdisable(indio_dev); goto error_disable_all_buffers; } } if (indio_dev->available_scan_masks) kfree(compound_mask); else kfree(old_mask); return success; error_disable_all_buffers: indio_dev->currentmode = INDIO_DIRECT_MODE; error_run_postdisable: if (indio_dev->setup_ops->postdisable) indio_dev->setup_ops->postdisable(indio_dev); error_remove_inserted: if (insert_buffer) list_del(&insert_buffer->buffer_list); indio_dev->active_scan_mask = old_mask; kfree(compound_mask); error_ret: return ret; } EXPORT_SYMBOL_GPL(iio_update_buffers); ssize_t iio_buffer_store_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret; bool requested_state; struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_buffer *pbuf = indio_dev->buffer; bool inlist; ret = strtobool(buf, &requested_state); if (ret < 0) return ret; mutex_lock(&indio_dev->mlock); /* Find out if it is in the list */ inlist = iio_buffer_is_active(indio_dev, pbuf); /* Already in desired state */ if (inlist == requested_state) goto done; if (requested_state) ret = iio_update_buffers(indio_dev, indio_dev->buffer, NULL); else ret = iio_update_buffers(indio_dev, NULL, indio_dev->buffer); if (ret < 0) goto done; done: mutex_unlock(&indio_dev->mlock); return (ret < 0) ? ret : len; } EXPORT_SYMBOL(iio_buffer_store_enable); int iio_sw_buffer_preenable(struct iio_dev *indio_dev) { struct iio_buffer *buffer; unsigned bytes; dev_dbg(&indio_dev->dev, "%s\n", __func__); list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) if (buffer->access->set_bytes_per_datum) { bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, buffer->scan_timestamp); buffer->access->set_bytes_per_datum(buffer, bytes); } return 0; } EXPORT_SYMBOL(iio_sw_buffer_preenable); /** * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected * @indio_dev: the iio device * @mask: scan mask to be checked * * Return true if exactly one bit is set in the scan mask, false otherwise. It * can be used for devices where only one channel can be active for sampling at * a time. */ bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, const unsigned long *mask) { return bitmap_weight(mask, indio_dev->masklength) == 1; } EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); static bool iio_validate_scan_mask(struct iio_dev *indio_dev, const unsigned long *mask) { if (!indio_dev->setup_ops->validate_scan_mask) return true; return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); } /** * iio_scan_mask_set() - set particular bit in the scan mask * @buffer: the buffer whose scan mask we are interested in * @bit: the bit to be set. * * Note that at this point we have no way of knowing what other * buffers might request, hence this code only verifies that the * individual buffers request is plausible. */ int iio_scan_mask_set(struct iio_dev *indio_dev, struct iio_buffer *buffer, int bit) { const unsigned long *mask; unsigned long *trialmask; trialmask = kmalloc(sizeof(*trialmask)* BITS_TO_LONGS(indio_dev->masklength), GFP_KERNEL); if (trialmask == NULL) return -ENOMEM; if (!indio_dev->masklength) { WARN_ON("trying to set scanmask prior to registering buffer\n"); goto err_invalid_mask; } bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); set_bit(bit, trialmask); if (!iio_validate_scan_mask(indio_dev, trialmask)) goto err_invalid_mask; if (indio_dev->available_scan_masks) { mask = iio_scan_mask_match(indio_dev->available_scan_masks, indio_dev->masklength, trialmask); if (!mask) goto err_invalid_mask; } bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); kfree(trialmask); return 0; err_invalid_mask: kfree(trialmask); return -EINVAL; } EXPORT_SYMBOL_GPL(iio_scan_mask_set); int iio_scan_mask_query(struct iio_dev *indio_dev, struct iio_buffer *buffer, int bit) { if (bit > indio_dev->masklength) return -EINVAL; if (!buffer->scan_mask) return 0; /* Ensure return value is 0 or 1. */ return !!test_bit(bit, buffer->scan_mask); }; EXPORT_SYMBOL_GPL(iio_scan_mask_query); /** * struct iio_demux_table() - table describing demux memcpy ops * @from: index to copy from * @to: index to copy to * @length: how many bytes to copy * @l: list head used for management */ struct iio_demux_table { unsigned from; unsigned to; unsigned length; struct list_head l; }; static unsigned char *iio_demux(struct iio_buffer *buffer, unsigned char *datain) { struct iio_demux_table *t; if (list_empty(&buffer->demux_list)) return datain; list_for_each_entry(t, &buffer->demux_list, l) memcpy(buffer->demux_bounce + t->to, datain + t->from, t->length); return buffer->demux_bounce; } static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data) { unsigned char *dataout = iio_demux(buffer, data); return buffer->access->store_to(buffer, dataout); } static void iio_buffer_demux_free(struct iio_buffer *buffer) { struct iio_demux_table *p, *q; list_for_each_entry_safe(p, q, &buffer->demux_list, l) { list_del(&p->l); kfree(p); } } int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data) { int ret; struct iio_buffer *buf; list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { ret = iio_push_to_buffer(buf, data); if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL_GPL(iio_push_to_buffers); static int iio_buffer_update_demux(struct iio_dev *indio_dev, struct iio_buffer *buffer) { const struct iio_chan_spec *ch; int ret, in_ind = -1, out_ind, length; unsigned in_loc = 0, out_loc = 0; struct iio_demux_table *p; /* Clear out any old demux */ iio_buffer_demux_free(buffer); kfree(buffer->demux_bounce); buffer->demux_bounce = NULL; /* First work out which scan mode we will actually have */ if (bitmap_equal(indio_dev->active_scan_mask, buffer->scan_mask, indio_dev->masklength)) return 0; /* Now we have the two masks, work from least sig and build up sizes */ for_each_set_bit(out_ind, buffer->scan_mask, indio_dev->masklength) { in_ind = find_next_bit(indio_dev->active_scan_mask, indio_dev->masklength, in_ind + 1); while (in_ind != out_ind) { in_ind = find_next_bit(indio_dev->active_scan_mask, indio_dev->masklength, in_ind + 1); ch = iio_find_channel_from_si(indio_dev, in_ind); length = ch->scan_type.storagebits/8; /* Make sure we are aligned */ in_loc += length; if (in_loc % length) in_loc += length - in_loc % length; } p = kmalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) { ret = -ENOMEM; goto error_clear_mux_table; } ch = iio_find_channel_from_si(indio_dev, in_ind); length = ch->scan_type.storagebits/8; if (out_loc % length) out_loc += length - out_loc % length; if (in_loc % length) in_loc += length - in_loc % length; p->from = in_loc; p->to = out_loc; p->length = length; list_add_tail(&p->l, &buffer->demux_list); out_loc += length; in_loc += length; } /* Relies on scan_timestamp being last */ if (buffer->scan_timestamp) { p = kmalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) { ret = -ENOMEM; goto error_clear_mux_table; } ch = iio_find_channel_from_si(indio_dev, indio_dev->scan_index_timestamp); length = ch->scan_type.storagebits/8; if (out_loc % length) out_loc += length - out_loc % length; if (in_loc % length) in_loc += length - in_loc % length; p->from = in_loc; p->to = out_loc; p->length = length; list_add_tail(&p->l, &buffer->demux_list); out_loc += length; in_loc += length; } buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); if (buffer->demux_bounce == NULL) { ret = -ENOMEM; goto error_clear_mux_table; } return 0; error_clear_mux_table: iio_buffer_demux_free(buffer); return ret; } int iio_update_demux(struct iio_dev *indio_dev) { struct iio_buffer *buffer; int ret; list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { ret = iio_buffer_update_demux(indio_dev, buffer); if (ret < 0) goto error_clear_mux_table; } return 0; error_clear_mux_table: list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) iio_buffer_demux_free(buffer); return ret; } EXPORT_SYMBOL_GPL(iio_update_demux);
gpl-2.0
Hax-on/android_kernel_zte_msm8996
arch/arm/mach-vexpress/spc.c
826
14327
/* * Versatile Express Serial Power Controller (SPC) support * * Copyright (C) 2013 ARM Ltd. * * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> * Achin Gupta <achin.gupta@arm.com> * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/cpu.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/semaphore.h> #include <asm/cacheflush.h> #define SPCLOG "vexpress-spc: " #define PERF_LVL_A15 0x00 #define PERF_REQ_A15 0x04 #define PERF_LVL_A7 0x08 #define PERF_REQ_A7 0x0c #define COMMS 0x10 #define COMMS_REQ 0x14 #define PWC_STATUS 0x18 #define PWC_FLAG 0x1c /* SPC wake-up IRQs status and mask */ #define WAKE_INT_MASK 0x24 #define WAKE_INT_RAW 0x28 #define WAKE_INT_STAT 0x2c /* SPC power down registers */ #define A15_PWRDN_EN 0x30 #define A7_PWRDN_EN 0x34 /* SPC per-CPU mailboxes */ #define A15_BX_ADDR0 0x68 #define A7_BX_ADDR0 0x78 /* SPC CPU/cluster reset statue */ #define STANDBYWFI_STAT 0x3c #define STANDBYWFI_STAT_A15_CPU_MASK(cpu) (1 << (cpu)) #define STANDBYWFI_STAT_A7_CPU_MASK(cpu) (1 << (3 + (cpu))) /* SPC system config interface registers */ #define SYSCFG_WDATA 0x70 #define SYSCFG_RDATA 0x74 /* A15/A7 OPP virtual register base */ #define A15_PERFVAL_BASE 0xC10 #define A7_PERFVAL_BASE 0xC30 /* Config interface control bits */ #define SYSCFG_START (1 << 31) #define SYSCFG_SCC (6 << 20) #define SYSCFG_STAT (14 << 20) /* wake-up interrupt masks */ #define GBL_WAKEUP_INT_MSK (0x3 << 10) /* TC2 static dual-cluster configuration */ #define MAX_CLUSTERS 2 /* * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS * operation, the operation could start just before jiffie is about * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz */ #define TIMEOUT_US 20000 #define MAX_OPPS 8 #define CA15_DVFS 0 #define CA7_DVFS 1 #define SPC_SYS_CFG 2 #define STAT_COMPLETE(type) ((1 << 0) << (type << 2)) #define STAT_ERR(type) ((1 << 1) << (type << 2)) #define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type)) struct ve_spc_opp { unsigned long freq; unsigned long u_volt; }; struct ve_spc_drvdata { void __iomem *baseaddr; /* * A15s cluster identifier * It corresponds to A15 processors MPIDR[15:8] bitfield */ u32 a15_clusid; uint32_t cur_rsp_mask; uint32_t cur_rsp_stat; struct semaphore sem; struct completion done; struct ve_spc_opp *opps[MAX_CLUSTERS]; int num_opps[MAX_CLUSTERS]; }; static struct ve_spc_drvdata *info; static inline bool cluster_is_a15(u32 cluster) { return cluster == info->a15_clusid; } /** * ve_spc_global_wakeup_irq() * * Function to set/clear global wakeup IRQs. Not protected by locking since * it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity. * * @set: if true, global wake-up IRQs are set, if false they are cleared */ void ve_spc_global_wakeup_irq(bool set) { u32 reg; reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); if (set) reg |= GBL_WAKEUP_INT_MSK; else reg &= ~GBL_WAKEUP_INT_MSK; writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); } /** * ve_spc_cpu_wakeup_irq() * * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since * it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity. * * @cluster: mpidr[15:8] bitfield describing cluster affinity level * @cpu: mpidr[7:0] bitfield describing cpu affinity level * @set: if true, wake-up IRQs are set, if false they are cleared */ void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) { u32 mask, reg; if (cluster >= MAX_CLUSTERS) return; mask = 1 << cpu; if (!cluster_is_a15(cluster)) mask <<= 4; reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); if (set) reg |= mask; else reg &= ~mask; writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); } /** * ve_spc_set_resume_addr() - set the jump address used for warm boot * * @cluster: mpidr[15:8] bitfield describing cluster affinity level * @cpu: mpidr[7:0] bitfield describing cpu affinity level * @addr: physical resume address */ void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) { void __iomem *baseaddr; if (cluster >= MAX_CLUSTERS) return; if (cluster_is_a15(cluster)) baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2); else baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2); writel_relaxed(addr, baseaddr); } /** * ve_spc_powerdown() * * Function to enable/disable cluster powerdown. Not protected by locking * since it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity. * * @cluster: mpidr[15:8] bitfield describing cluster affinity level * @enable: if true enables powerdown, if false disables it */ void ve_spc_powerdown(u32 cluster, bool enable) { u32 pwdrn_reg; if (cluster >= MAX_CLUSTERS) return; pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN; writel_relaxed(enable, info->baseaddr + pwdrn_reg); } static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster) { return cluster_is_a15(cluster) ? STANDBYWFI_STAT_A15_CPU_MASK(cpu) : STANDBYWFI_STAT_A7_CPU_MASK(cpu); } /** * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) * * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster * @cluster: mpidr[15:8] bitfield describing cluster affinity level * * @return: non-zero if and only if the specified CPU is in WFI * * Take care when interpreting the result of this function: a CPU might * be in WFI temporarily due to idle, and is not necessarily safely * parked. */ int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) { int ret; u32 mask = standbywfi_cpu_mask(cpu, cluster); if (cluster >= MAX_CLUSTERS) return 1; ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT); pr_debug("%s: PCFGREG[0x%X] = 0x%08X, mask = 0x%X\n", __func__, STANDBYWFI_STAT, ret, mask); return ret & mask; } static int ve_spc_get_performance(int cluster, u32 *freq) { struct ve_spc_opp *opps = info->opps[cluster]; u32 perf_cfg_reg = 0; u32 perf; perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7; perf = readl_relaxed(info->baseaddr + perf_cfg_reg); if (perf >= info->num_opps[cluster]) return -EINVAL; opps += perf; *freq = opps->freq; return 0; } /* find closest match to given frequency in OPP table */ static int ve_spc_round_performance(int cluster, u32 freq) { int idx, max_opp = info->num_opps[cluster]; struct ve_spc_opp *opps = info->opps[cluster]; u32 fmin = 0, fmax = ~0, ftmp; freq /= 1000; /* OPP entries in kHz */ for (idx = 0; idx < max_opp; idx++, opps++) { ftmp = opps->freq; if (ftmp >= freq) { if (ftmp <= fmax) fmax = ftmp; } else { if (ftmp >= fmin) fmin = ftmp; } } if (fmax != ~0) return fmax * 1000; else return fmin * 1000; } static int ve_spc_find_performance_index(int cluster, u32 freq) { int idx, max_opp = info->num_opps[cluster]; struct ve_spc_opp *opps = info->opps[cluster]; for (idx = 0; idx < max_opp; idx++, opps++) if (opps->freq == freq) break; return (idx == max_opp) ? -EINVAL : idx; } static int ve_spc_waitforcompletion(int req_type) { int ret = wait_for_completion_interruptible_timeout( &info->done, usecs_to_jiffies(TIMEOUT_US)); if (ret == 0) ret = -ETIMEDOUT; else if (ret > 0) ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO; return ret; } static int ve_spc_set_performance(int cluster, u32 freq) { u32 perf_cfg_reg, perf_stat_reg; int ret, perf, req_type; if (cluster_is_a15(cluster)) { req_type = CA15_DVFS; perf_cfg_reg = PERF_LVL_A15; perf_stat_reg = PERF_REQ_A15; } else { req_type = CA7_DVFS; perf_cfg_reg = PERF_LVL_A7; perf_stat_reg = PERF_REQ_A7; } perf = ve_spc_find_performance_index(cluster, freq); if (perf < 0) return perf; if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) return -ETIME; init_completion(&info->done); info->cur_rsp_mask = RESPONSE_MASK(req_type); writel(perf, info->baseaddr + perf_cfg_reg); ret = ve_spc_waitforcompletion(req_type); info->cur_rsp_mask = 0; up(&info->sem); return ret; } static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data) { int ret; if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) return -ETIME; init_completion(&info->done); info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG); /* Set the control value */ writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS); ret = ve_spc_waitforcompletion(SPC_SYS_CFG); if (ret == 0) *data = readl(info->baseaddr + SYSCFG_RDATA); info->cur_rsp_mask = 0; up(&info->sem); return ret; } static irqreturn_t ve_spc_irq_handler(int irq, void *data) { struct ve_spc_drvdata *drv_data = data; uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS); if (info->cur_rsp_mask & status) { info->cur_rsp_stat = status; complete(&drv_data->done); } return IRQ_HANDLED; } /* * +--------------------------+ * | 31 20 | 19 0 | * +--------------------------+ * | m_volt | freq(kHz) | * +--------------------------+ */ #define MULT_FACTOR 20 #define VOLT_SHIFT 20 #define FREQ_MASK (0xFFFFF) static int ve_spc_populate_opps(uint32_t cluster) { uint32_t data = 0, off, ret, idx; struct ve_spc_opp *opps; opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL); if (!opps) return -ENOMEM; info->opps[cluster] = opps; off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE; for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) { ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data); if (!ret) { opps->freq = (data & FREQ_MASK) * MULT_FACTOR; opps->u_volt = (data >> VOLT_SHIFT) * 1000; } else { break; } } info->num_opps[cluster] = idx; return ret; } static int ve_init_opp_table(struct device *cpu_dev) { int cluster; int idx, ret = 0, max_opp; struct ve_spc_opp *opps; cluster = topology_physical_package_id(cpu_dev->id); cluster = cluster < 0 ? 0 : cluster; max_opp = info->num_opps[cluster]; opps = info->opps[cluster]; for (idx = 0; idx < max_opp; idx++, opps++) { ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); if (ret) { dev_warn(cpu_dev, "failed to add opp %lu %lu\n", opps->freq, opps->u_volt); return ret; } } return ret; } int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq) { int ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { pr_err(SPCLOG "unable to allocate mem\n"); return -ENOMEM; } info->baseaddr = baseaddr; info->a15_clusid = a15_clusid; if (irq <= 0) { pr_err(SPCLOG "Invalid IRQ %d\n", irq); kfree(info); return -EINVAL; } init_completion(&info->done); readl_relaxed(info->baseaddr + PWC_STATUS); ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "vexpress-spc", info); if (ret) { pr_err(SPCLOG "IRQ %d request failed\n", irq); kfree(info); return -ENODEV; } sema_init(&info->sem, 1); /* * Multi-cluster systems may need this data when non-coherent, during * cluster power-up/power-down. Make sure driver info reaches main * memory. */ sync_cache_w(info); sync_cache_w(&info); return 0; } struct clk_spc { struct clk_hw hw; int cluster; }; #define to_clk_spc(spc) container_of(spc, struct clk_spc, hw) static unsigned long spc_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_spc *spc = to_clk_spc(hw); u32 freq; if (ve_spc_get_performance(spc->cluster, &freq)) return -EIO; return freq * 1000; } static long spc_round_rate(struct clk_hw *hw, unsigned long drate, unsigned long *parent_rate) { struct clk_spc *spc = to_clk_spc(hw); return ve_spc_round_performance(spc->cluster, drate); } static int spc_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_spc *spc = to_clk_spc(hw); return ve_spc_set_performance(spc->cluster, rate / 1000); } static struct clk_ops clk_spc_ops = { .recalc_rate = spc_recalc_rate, .round_rate = spc_round_rate, .set_rate = spc_set_rate, }; static struct clk *ve_spc_clk_register(struct device *cpu_dev) { struct clk_init_data init; struct clk_spc *spc; spc = kzalloc(sizeof(*spc), GFP_KERNEL); if (!spc) { pr_err("could not allocate spc clk\n"); return ERR_PTR(-ENOMEM); } spc->hw.init = &init; spc->cluster = topology_physical_package_id(cpu_dev->id); spc->cluster = spc->cluster < 0 ? 0 : spc->cluster; init.name = dev_name(cpu_dev); init.ops = &clk_spc_ops; init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE; init.num_parents = 0; return devm_clk_register(cpu_dev, &spc->hw); } static int __init ve_spc_clk_init(void) { int cpu; struct clk *clk; if (!info) return 0; /* Continue only if SPC is initialised */ if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) { pr_err("failed to build OPP table\n"); return -ENODEV; } for_each_possible_cpu(cpu) { struct device *cpu_dev = get_cpu_device(cpu); if (!cpu_dev) { pr_warn("failed to get cpu%d device\n", cpu); continue; } clk = ve_spc_clk_register(cpu_dev); if (IS_ERR(clk)) { pr_warn("failed to register cpu%d clock\n", cpu); continue; } if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) { pr_warn("failed to register cpu%d clock lookup\n", cpu); continue; } if (ve_init_opp_table(cpu_dev)) pr_warn("failed to initialise cpu%d opp table\n", cpu); } platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); return 0; } module_init(ve_spc_clk_init);
gpl-2.0
romracer/sgs2sr-kernel
drivers/media/common/saa7146_hlp.c
1594
30652
#include <linux/kernel.h> #include <media/saa7146_vv.h> static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format) { /* clear out the necessary bits */ *clip_format &= 0x0000ffff; /* set these bits new */ *clip_format |= (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16)); } static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl) { *hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28); *hps_ctrl |= (source << 30) | (sync << 28); } static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl) { int hyo = 0, hxo = 0; hyo = vv->standard->v_offset; hxo = vv->standard->h_offset; *hps_h_scale &= ~(MASK_B0 | 0xf00); *hps_h_scale |= (hxo << 0); *hps_ctrl &= ~(MASK_W0 | MASK_B2); *hps_ctrl |= (hyo << 12); } /* helper functions for the calculation of the horizontal- and vertical scaling registers, clip-format-register etc ... these functions take pointers to the (most-likely read-out original-values) and manipulate them according to the requested changes. */ /* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */ static struct { u16 hps_coeff; u16 weight_sum; } hps_h_coeff_tab [] = { {0x00, 2}, {0x02, 4}, {0x00, 4}, {0x06, 8}, {0x02, 8}, {0x08, 8}, {0x00, 8}, {0x1E, 16}, {0x0E, 8}, {0x26, 8}, {0x06, 8}, {0x42, 8}, {0x02, 8}, {0x80, 8}, {0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16} }; /* table of attenuation values for horizontal scaling */ static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0}; /* calculate horizontal scale registers */ static int calculate_h_scale_registers(struct saa7146_dev *dev, int in_x, int out_x, int flip_lr, u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale) { /* horizontal prescaler */ u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0; /* horizontal scaler */ u32 xim = 0, xp = 0, xsci =0; /* vertical scale & gain */ u32 pfuv = 0; /* helper variables */ u32 h_atten = 0, i = 0; if ( 0 == out_x ) { return -EINVAL; } /* mask out vanity-bit */ *hps_ctrl &= ~MASK_29; /* calculate prescale-(xspc)-value: [n .. 1/2) : 1 [1/2 .. 1/3) : 2 [1/3 .. 1/4) : 3 ... */ if (in_x > out_x) { xpsc = in_x / out_x; } else { /* zooming */ xpsc = 1; } /* if flip_lr-bit is set, number of pixels after horizontal prescaling must be < 384 */ if ( 0 != flip_lr ) { /* set vanity bit */ *hps_ctrl |= MASK_29; while (in_x / xpsc >= 384 ) xpsc++; } /* if zooming is wanted, number of pixels after horizontal prescaling must be < 768 */ else { while ( in_x / xpsc >= 768 ) xpsc++; } /* maximum prescale is 64 (p.69) */ if ( xpsc > 64 ) xpsc = 64; /* keep xacm clear*/ xacm = 0; /* set horizontal filter parameters (CXY = CXUV) */ cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff; cxuv = cxy; /* calculate and set horizontal fine scale (xsci) */ /* bypass the horizontal scaler ? */ if ( (in_x == out_x) && ( 1 == xpsc ) ) xsci = 0x400; else xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc; /* set start phase for horizontal fine scale (xp) to 0 */ xp = 0; /* set xim, if we bypass the horizontal scaler */ if ( 0x400 == xsci ) xim = 1; else xim = 0; /* if the prescaler is bypassed, enable horizontal accumulation mode (xacm) and clear dcgx */ if( 1 == xpsc ) { xacm = 1; dcgx = 0; } else { xacm = 0; /* get best match in the table of attenuations for horizontal scaling */ h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum; for (i = 0; h_attenuation[i] != 0; i++) { if (h_attenuation[i] >= h_atten) break; } dcgx = i; } /* the horizontal scaling increment controls the UV filter to reduce the bandwidth to improve the display quality, so set it ... */ if ( xsci == 0x400) pfuv = 0x00; else if ( xsci < 0x600) pfuv = 0x01; else if ( xsci < 0x680) pfuv = 0x11; else if ( xsci < 0x700) pfuv = 0x22; else pfuv = 0x33; *hps_v_gain &= MASK_W0|MASK_B2; *hps_v_gain |= (pfuv << 24); *hps_h_scale &= ~(MASK_W1 | 0xf000); *hps_h_scale |= (xim << 31) | (xp << 24) | (xsci << 12); *hps_h_prescale |= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0); return 0; } static struct { u16 hps_coeff; u16 weight_sum; } hps_v_coeff_tab [] = { {0x0100, 2}, {0x0102, 4}, {0x0300, 4}, {0x0106, 8}, {0x0502, 8}, {0x0708, 8}, {0x0F00, 8}, {0x011E, 16}, {0x110E, 16}, {0x1926, 16}, {0x3906, 16}, {0x3D42, 16}, {0x7D02, 16}, {0x7F80, 16}, {0xFF00, 16}, {0x01FE, 32}, {0x01FE, 32}, {0x817E, 32}, {0x817E, 32}, {0xC13E, 32}, {0xC13E, 32}, {0xE11E, 32}, {0xE11E, 32}, {0xF10E, 32}, {0xF10E, 32}, {0xF906, 32}, {0xF906, 32}, {0xFD02, 32}, {0xFD02, 32}, {0xFF00, 32}, {0xFF00, 32}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x817E, 64}, {0x817E, 64}, {0xC13E, 64}, {0xC13E, 64}, {0xE11E, 64}, {0xE11E, 64}, {0xF10E, 64}, {0xF10E, 64}, {0xF906, 64}, {0xF906, 64}, {0xFD02, 64}, {0xFD02, 64}, {0xFF00, 64}, {0xFF00, 64}, {0x01FE, 128} }; /* table of attenuation values for vertical scaling */ static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0}; /* calculate vertical scale registers */ static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field, int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain) { int lpi = 0; /* vertical scaling */ u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0; /* vertical scale & gain */ u32 dcgy = 0, cya_cyb = 0; /* helper variables */ u32 v_atten = 0, i = 0; /* error, if vertical zooming */ if ( in_y < out_y ) { return -EINVAL; } /* linear phase interpolation may be used if scaling is between 1 and 1/2 (both fields used) or scaling is between 1/2 and 1/4 (if only one field is used) */ if (V4L2_FIELD_HAS_BOTH(field)) { if( 2*out_y >= in_y) { lpi = 1; } } else if (field == V4L2_FIELD_TOP || field == V4L2_FIELD_ALTERNATE || field == V4L2_FIELD_BOTTOM) { if( 4*out_y >= in_y ) { lpi = 1; } out_y *= 2; } if( 0 != lpi ) { yacm = 0; yacl = 0; cya_cyb = 0x00ff; /* calculate scaling increment */ if ( in_y > out_y ) ysci = ((1024 * in_y) / (out_y + 1)) - 1024; else ysci = 0; dcgy = 0; /* calculate ype and ypo */ ype = ysci / 16; ypo = ype + (ysci / 64); } else { yacm = 1; /* calculate scaling increment */ ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10; /* calculate ype and ypo */ ypo = ype = ((ysci + 15) / 16); /* the sequence length interval (yacl) has to be set according to the prescale value, e.g. [n .. 1/2) : 0 [1/2 .. 1/3) : 1 [1/3 .. 1/4) : 2 ... */ if ( ysci < 512) { yacl = 0; } else { yacl = ( ysci / (1024 - ysci) ); } /* get filter coefficients for cya, cyb from table hps_v_coeff_tab */ cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff; /* get best match in the table of attenuations for vertical scaling */ v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum; for (i = 0; v_attenuation[i] != 0; i++) { if (v_attenuation[i] >= v_atten) break; } dcgy = i; } /* ypo and ype swapped in spec ? */ *hps_v_scale |= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1); *hps_v_gain &= ~(MASK_W0|MASK_B2); *hps_v_gain |= (dcgy << 16) | (cya_cyb << 0); return 0; } /* simple bubble-sort algorithm with duplicate elimination */ static int sort_and_eliminate(u32* values, int* count) { int low = 0, high = 0, top = 0, temp = 0; int cur = 0, next = 0; /* sanity checks */ if( (0 > *count) || (NULL == values) ) { return -EINVAL; } /* bubble sort the first @count items of the array @values */ for( top = *count; top > 0; top--) { for( low = 0, high = 1; high < top; low++, high++) { if( values[low] > values[high] ) { temp = values[low]; values[low] = values[high]; values[high] = temp; } } } /* remove duplicate items */ for( cur = 0, next = 1; next < *count; next++) { if( values[cur] != values[next]) values[++cur] = values[next]; } *count = cur + 1; return 0; } static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct saa7146_fh *fh, struct saa7146_video_dma *vdma2, u32* clip_format, u32* arbtr_ctrl, enum v4l2_field field) { struct saa7146_vv *vv = dev->vv_data; __le32 *clipping = vv->d_clipping.cpu_addr; int width = fh->ov.win.w.width; int height = fh->ov.win.w.height; int clipcount = fh->ov.nclips; u32 line_list[32]; u32 pixel_list[32]; int numdwords = 0; int i = 0, j = 0; int cnt_line = 0, cnt_pixel = 0; int x[32], y[32], w[32], h[32]; /* clear out memory */ memset(&line_list[0], 0x00, sizeof(u32)*32); memset(&pixel_list[0], 0x00, sizeof(u32)*32); memset(clipping, 0x00, SAA7146_CLIPPING_MEM); /* fill the line and pixel-lists */ for(i = 0; i < clipcount; i++) { int l = 0, r = 0, t = 0, b = 0; x[i] = fh->ov.clips[i].c.left; y[i] = fh->ov.clips[i].c.top; w[i] = fh->ov.clips[i].c.width; h[i] = fh->ov.clips[i].c.height; if( w[i] < 0) { x[i] += w[i]; w[i] = -w[i]; } if( h[i] < 0) { y[i] += h[i]; h[i] = -h[i]; } if( x[i] < 0) { w[i] += x[i]; x[i] = 0; } if( y[i] < 0) { h[i] += y[i]; y[i] = 0; } if( 0 != vv->vflip ) { y[i] = height - y[i] - h[i]; } l = x[i]; r = x[i]+w[i]; t = y[i]; b = y[i]+h[i]; /* insert left/right coordinates */ pixel_list[ 2*i ] = min_t(int, l, width); pixel_list[(2*i)+1] = min_t(int, r, width); /* insert top/bottom coordinates */ line_list[ 2*i ] = min_t(int, t, height); line_list[(2*i)+1] = min_t(int, b, height); } /* sort and eliminate lists */ cnt_line = cnt_pixel = 2*clipcount; sort_and_eliminate( &pixel_list[0], &cnt_pixel ); sort_and_eliminate( &line_list[0], &cnt_line ); /* calculate the number of used u32s */ numdwords = max_t(int, (cnt_line+1), (cnt_pixel+1))*2; numdwords = max_t(int, 4, numdwords); numdwords = min_t(int, 64, numdwords); /* fill up cliptable */ for(i = 0; i < cnt_pixel; i++) { clipping[2*i] |= cpu_to_le32(pixel_list[i] << 16); } for(i = 0; i < cnt_line; i++) { clipping[(2*i)+1] |= cpu_to_le32(line_list[i] << 16); } /* fill up cliptable with the display infos */ for(j = 0; j < clipcount; j++) { for(i = 0; i < cnt_pixel; i++) { if( x[j] < 0) x[j] = 0; if( pixel_list[i] < (x[j] + w[j])) { if ( pixel_list[i] >= x[j] ) { clipping[2*i] |= cpu_to_le32(1 << j); } } } for(i = 0; i < cnt_line; i++) { if( y[j] < 0) y[j] = 0; if( line_list[i] < (y[j] + h[j]) ) { if( line_list[i] >= y[j] ) { clipping[(2*i)+1] |= cpu_to_le32(1 << j); } } } } /* adjust arbitration control register */ *arbtr_ctrl &= 0xffff00ff; *arbtr_ctrl |= 0x00001c00; vdma2->base_even = vv->d_clipping.dma_handle; vdma2->base_odd = vv->d_clipping.dma_handle; vdma2->prot_addr = vv->d_clipping.dma_handle+((sizeof(u32))*(numdwords)); vdma2->base_page = 0x04; vdma2->pitch = 0x00; vdma2->num_line_byte = (0 << 16 | (sizeof(u32))*(numdwords-1) ); /* set clipping-mode. this depends on the field(s) used */ *clip_format &= 0xfffffff7; if (V4L2_FIELD_HAS_BOTH(field)) { *clip_format |= 0x00000008; } else { *clip_format |= 0x00000000; } } /* disable clipping */ static void saa7146_disable_clipping(struct saa7146_dev *dev) { u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL); /* mask out relevant bits (=lower word)*/ clip_format &= MASK_W1; /* upload clipping-registers*/ saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format); saa7146_write(dev, MC2, (MASK_05 | MASK_21)); /* disable video dma2 */ saa7146_write(dev, MC1, MASK_21); } static void saa7146_set_clipping_rect(struct saa7146_fh *fh) { struct saa7146_dev *dev = fh->dev; enum v4l2_field field = fh->ov.win.field; struct saa7146_video_dma vdma2; u32 clip_format; u32 arbtr_ctrl; /* check clipcount, disable clipping if clipcount == 0*/ if( fh->ov.nclips == 0 ) { saa7146_disable_clipping(dev); return; } clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL); arbtr_ctrl = saa7146_read(dev, PCI_BT_V1); calculate_clipping_registers_rect(dev, fh, &vdma2, &clip_format, &arbtr_ctrl, field); /* set clipping format */ clip_format &= 0xffff0008; clip_format |= (SAA7146_CLIPPING_RECT << 4); /* prepare video dma2 */ saa7146_write(dev, BASE_EVEN2, vdma2.base_even); saa7146_write(dev, BASE_ODD2, vdma2.base_odd); saa7146_write(dev, PROT_ADDR2, vdma2.prot_addr); saa7146_write(dev, BASE_PAGE2, vdma2.base_page); saa7146_write(dev, PITCH2, vdma2.pitch); saa7146_write(dev, NUM_LINE_BYTE2, vdma2.num_line_byte); /* prepare the rest */ saa7146_write(dev, CLIP_FORMAT_CTRL,clip_format); saa7146_write(dev, PCI_BT_V1, arbtr_ctrl); /* upload clip_control-register, clipping-registers, enable video dma2 */ saa7146_write(dev, MC2, (MASK_05 | MASK_21 | MASK_03 | MASK_19)); saa7146_write(dev, MC1, (MASK_05 | MASK_21)); } static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, enum v4l2_field field) { struct saa7146_vv *vv = dev->vv_data; int source = vv->current_hps_source; int sync = vv->current_hps_sync; u32 hps_v_scale = 0, hps_v_gain = 0, hps_ctrl = 0, hps_h_prescale = 0, hps_h_scale = 0; /* set vertical scale */ hps_v_scale = 0; /* all bits get set by the function-call */ hps_v_gain = 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/ calculate_v_scale_registers(dev, field, vv->standard->v_field*2, height, &hps_v_scale, &hps_v_gain); /* set horizontal scale */ hps_ctrl = 0; hps_h_prescale = 0; /* all bits get set in the function */ hps_h_scale = 0; calculate_h_scale_registers(dev, vv->standard->h_pixels, width, vv->hflip, &hps_ctrl, &hps_v_gain, &hps_h_prescale, &hps_h_scale); /* set hyo and hxo */ calculate_hxo_and_hyo(vv, &hps_h_scale, &hps_ctrl); calculate_hps_source_and_sync(dev, source, sync, &hps_ctrl); /* write out new register contents */ saa7146_write(dev, HPS_V_SCALE, hps_v_scale); saa7146_write(dev, HPS_V_GAIN, hps_v_gain); saa7146_write(dev, HPS_CTRL, hps_ctrl); saa7146_write(dev, HPS_H_PRESCALE,hps_h_prescale); saa7146_write(dev, HPS_H_SCALE, hps_h_scale); /* upload shadow-ram registers */ saa7146_write(dev, MC2, (MASK_05 | MASK_06 | MASK_21 | MASK_22) ); } /* calculate the new memory offsets for a desired position */ static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat) { struct saa7146_vv *vv = dev->vv_data; struct saa7146_format *sfmt = format_by_fourcc(dev, pixelformat); int b_depth = vv->ov_fmt->depth; int b_bpl = vv->ov_fb.fmt.bytesperline; /* The unsigned long cast is to remove a 64-bit compile warning since it looks like a 64-bit address is cast to a 32-bit value, even though the base pointer is really a 32-bit physical address that goes into a 32-bit DMA register. FIXME: might not work on some 64-bit platforms, but see the FIXME in struct v4l2_framebuffer (videodev2.h) for that. */ u32 base = (u32)(unsigned long)vv->ov_fb.base; struct saa7146_video_dma vdma1; /* calculate memory offsets for picture, look if we shall top-down-flip */ vdma1.pitch = 2*b_bpl; if ( 0 == vv->vflip ) { vdma1.base_even = base + (w_y * (vdma1.pitch/2)) + (w_x * (b_depth / 8)); vdma1.base_odd = vdma1.base_even + (vdma1.pitch / 2); vdma1.prot_addr = vdma1.base_even + (w_height * (vdma1.pitch / 2)); } else { vdma1.base_even = base + ((w_y+w_height) * (vdma1.pitch/2)) + (w_x * (b_depth / 8)); vdma1.base_odd = vdma1.base_even - (vdma1.pitch / 2); vdma1.prot_addr = vdma1.base_odd - (w_height * (vdma1.pitch / 2)); } if (V4L2_FIELD_HAS_BOTH(field)) { } else if (field == V4L2_FIELD_ALTERNATE) { /* fixme */ vdma1.base_odd = vdma1.prot_addr; vdma1.pitch /= 2; } else if (field == V4L2_FIELD_TOP) { vdma1.base_odd = vdma1.prot_addr; vdma1.pitch /= 2; } else if (field == V4L2_FIELD_BOTTOM) { vdma1.base_odd = vdma1.base_even; vdma1.base_even = vdma1.prot_addr; vdma1.pitch /= 2; } if ( 0 != vv->vflip ) { vdma1.pitch *= -1; } vdma1.base_page = sfmt->swap; vdma1.num_line_byte = (vv->standard->v_field<<16)+vv->standard->h_pixels; saa7146_write_out_dma(dev, 1, &vdma1); } static void saa7146_set_output_format(struct saa7146_dev *dev, unsigned long palette) { u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL); /* call helper function */ calculate_output_format_register(dev,palette,&clip_format); /* update the hps registers */ saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format); saa7146_write(dev, MC2, (MASK_05 | MASK_21)); } /* select input-source */ void saa7146_set_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync) { struct saa7146_vv *vv = dev->vv_data; u32 hps_ctrl = 0; /* read old state */ hps_ctrl = saa7146_read(dev, HPS_CTRL); hps_ctrl &= ~( MASK_31 | MASK_30 | MASK_28 ); hps_ctrl |= (source << 30) | (sync << 28); /* write back & upload register */ saa7146_write(dev, HPS_CTRL, hps_ctrl); saa7146_write(dev, MC2, (MASK_05 | MASK_21)); vv->current_hps_source = source; vv->current_hps_sync = sync; } EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync); int saa7146_enable_overlay(struct saa7146_fh *fh) { struct saa7146_dev *dev = fh->dev; struct saa7146_vv *vv = dev->vv_data; saa7146_set_window(dev, fh->ov.win.w.width, fh->ov.win.w.height, fh->ov.win.field); saa7146_set_position(dev, fh->ov.win.w.left, fh->ov.win.w.top, fh->ov.win.w.height, fh->ov.win.field, vv->ov_fmt->pixelformat); saa7146_set_output_format(dev, vv->ov_fmt->trans); saa7146_set_clipping_rect(fh); /* enable video dma1 */ saa7146_write(dev, MC1, (MASK_06 | MASK_22)); return 0; } void saa7146_disable_overlay(struct saa7146_fh *fh) { struct saa7146_dev *dev = fh->dev; /* disable clipping + video dma1 */ saa7146_disable_clipping(dev); saa7146_write(dev, MC1, MASK_22); } void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma) { int where = 0; if( which < 1 || which > 3) { return; } /* calculate starting address */ where = (which-1)*0x18; saa7146_write(dev, where, vdma->base_odd); saa7146_write(dev, where+0x04, vdma->base_even); saa7146_write(dev, where+0x08, vdma->prot_addr); saa7146_write(dev, where+0x0c, vdma->pitch); saa7146_write(dev, where+0x10, vdma->base_page); saa7146_write(dev, where+0x14, vdma->num_line_byte); /* upload */ saa7146_write(dev, MC2, (MASK_02<<(which-1))|(MASK_18<<(which-1))); /* printk("vdma%d.base_even: 0x%08x\n", which,vdma->base_even); printk("vdma%d.base_odd: 0x%08x\n", which,vdma->base_odd); printk("vdma%d.prot_addr: 0x%08x\n", which,vdma->prot_addr); printk("vdma%d.base_page: 0x%08x\n", which,vdma->base_page); printk("vdma%d.pitch: 0x%08x\n", which,vdma->pitch); printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte); */ } static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa7146_buf *buf) { struct saa7146_vv *vv = dev->vv_data; struct saa7146_video_dma vdma1; struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); int width = buf->fmt->width; int height = buf->fmt->height; int bytesperline = buf->fmt->bytesperline; enum v4l2_field field = buf->fmt->field; int depth = sfmt->depth; DEB_CAP(("[size=%dx%d,fields=%s]\n", width,height,v4l2_field_names[field])); if( bytesperline != 0) { vdma1.pitch = bytesperline*2; } else { vdma1.pitch = (width*depth*2)/8; } vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels); vdma1.base_page = buf->pt[0].dma | ME1 | sfmt->swap; if( 0 != vv->vflip ) { vdma1.prot_addr = buf->pt[0].offset; vdma1.base_even = buf->pt[0].offset+(vdma1.pitch/2)*height; vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2); } else { vdma1.base_even = buf->pt[0].offset; vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2); vdma1.prot_addr = buf->pt[0].offset+(vdma1.pitch/2)*height; } if (V4L2_FIELD_HAS_BOTH(field)) { } else if (field == V4L2_FIELD_ALTERNATE) { /* fixme */ if ( vv->last_field == V4L2_FIELD_TOP ) { vdma1.base_odd = vdma1.prot_addr; vdma1.pitch /= 2; } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { vdma1.base_odd = vdma1.base_even; vdma1.base_even = vdma1.prot_addr; vdma1.pitch /= 2; } } else if (field == V4L2_FIELD_TOP) { vdma1.base_odd = vdma1.prot_addr; vdma1.pitch /= 2; } else if (field == V4L2_FIELD_BOTTOM) { vdma1.base_odd = vdma1.base_even; vdma1.base_even = vdma1.prot_addr; vdma1.pitch /= 2; } if( 0 != vv->vflip ) { vdma1.pitch *= -1; } saa7146_write_out_dma(dev, 1, &vdma1); return 0; } static int calc_planar_422(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3) { int height = buf->fmt->height; int width = buf->fmt->width; vdma2->pitch = width; vdma3->pitch = width; /* fixme: look at bytesperline! */ if( 0 != vv->vflip ) { vdma2->prot_addr = buf->pt[1].offset; vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[1].offset; vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2); vdma3->prot_addr = buf->pt[2].offset; vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[2].offset; vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2); } else { vdma3->base_even = buf->pt[2].offset; vdma3->base_odd = vdma3->base_even + (vdma3->pitch/2); vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset; vdma2->base_even = buf->pt[1].offset; vdma2->base_odd = vdma2->base_even + (vdma2->pitch/2); vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset; } return 0; } static int calc_planar_420(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3) { int height = buf->fmt->height; int width = buf->fmt->width; vdma2->pitch = width/2; vdma3->pitch = width/2; if( 0 != vv->vflip ) { vdma2->prot_addr = buf->pt[2].offset; vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[2].offset; vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2); vdma3->prot_addr = buf->pt[1].offset; vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[1].offset; vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2); } else { vdma3->base_even = buf->pt[2].offset; vdma3->base_odd = vdma3->base_even + (vdma3->pitch); vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset; vdma2->base_even = buf->pt[1].offset; vdma2->base_odd = vdma2->base_even + (vdma2->pitch); vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset; } return 0; } static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa7146_buf *buf) { struct saa7146_vv *vv = dev->vv_data; struct saa7146_video_dma vdma1; struct saa7146_video_dma vdma2; struct saa7146_video_dma vdma3; struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); int width = buf->fmt->width; int height = buf->fmt->height; enum v4l2_field field = buf->fmt->field; BUG_ON(0 == buf->pt[0].dma); BUG_ON(0 == buf->pt[1].dma); BUG_ON(0 == buf->pt[2].dma); DEB_CAP(("[size=%dx%d,fields=%s]\n", width,height,v4l2_field_names[field])); /* fixme: look at bytesperline! */ /* fixme: what happens for user space buffers here?. The offsets are most likely wrong, this version here only works for page-aligned buffers, modifications to the pagetable-functions are necessary...*/ vdma1.pitch = width*2; vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels); vdma1.base_page = buf->pt[0].dma | ME1; if( 0 != vv->vflip ) { vdma1.prot_addr = buf->pt[0].offset; vdma1.base_even = ((vdma1.pitch/2)*height)+buf->pt[0].offset; vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2); } else { vdma1.base_even = buf->pt[0].offset; vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2); vdma1.prot_addr = (vdma1.pitch/2)*height+buf->pt[0].offset; } vdma2.num_line_byte = 0; /* unused */ vdma2.base_page = buf->pt[1].dma | ME1; vdma3.num_line_byte = 0; /* unused */ vdma3.base_page = buf->pt[2].dma | ME1; switch( sfmt->depth ) { case 12: { calc_planar_420(vv,buf,&vdma2,&vdma3); break; } case 16: { calc_planar_422(vv,buf,&vdma2,&vdma3); break; } default: { return -1; } } if (V4L2_FIELD_HAS_BOTH(field)) { } else if (field == V4L2_FIELD_ALTERNATE) { /* fixme */ vdma1.base_odd = vdma1.prot_addr; vdma1.pitch /= 2; vdma2.base_odd = vdma2.prot_addr; vdma2.pitch /= 2; vdma3.base_odd = vdma3.prot_addr; vdma3.pitch /= 2; } else if (field == V4L2_FIELD_TOP) { vdma1.base_odd = vdma1.prot_addr; vdma1.pitch /= 2; vdma2.base_odd = vdma2.prot_addr; vdma2.pitch /= 2; vdma3.base_odd = vdma3.prot_addr; vdma3.pitch /= 2; } else if (field == V4L2_FIELD_BOTTOM) { vdma1.base_odd = vdma1.base_even; vdma1.base_even = vdma1.prot_addr; vdma1.pitch /= 2; vdma2.base_odd = vdma2.base_even; vdma2.base_even = vdma2.prot_addr; vdma2.pitch /= 2; vdma3.base_odd = vdma3.base_even; vdma3.base_even = vdma3.prot_addr; vdma3.pitch /= 2; } if( 0 != vv->vflip ) { vdma1.pitch *= -1; vdma2.pitch *= -1; vdma3.pitch *= -1; } saa7146_write_out_dma(dev, 1, &vdma1); if( (sfmt->flags & FORMAT_BYTE_SWAP) != 0 ) { saa7146_write_out_dma(dev, 3, &vdma2); saa7146_write_out_dma(dev, 2, &vdma3); } else { saa7146_write_out_dma(dev, 2, &vdma2); saa7146_write_out_dma(dev, 3, &vdma3); } return 0; } static void program_capture_engine(struct saa7146_dev *dev, int planar) { struct saa7146_vv *vv = dev->vv_data; int count = 0; unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B; unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B; /* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/ WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | o_wait); WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | e_wait); /* set rps register 0 */ WRITE_RPS0(CMD_WR_REG | (1 << 8) | (MC2/4)); WRITE_RPS0(MASK_27 | MASK_11); /* turn on video-dma1 */ WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); WRITE_RPS0(MASK_06 | MASK_22); /* => mask */ WRITE_RPS0(MASK_06 | MASK_22); /* => values */ if( 0 != planar ) { /* turn on video-dma2 */ WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); WRITE_RPS0(MASK_05 | MASK_21); /* => mask */ WRITE_RPS0(MASK_05 | MASK_21); /* => values */ /* turn on video-dma3 */ WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); WRITE_RPS0(MASK_04 | MASK_20); /* => mask */ WRITE_RPS0(MASK_04 | MASK_20); /* => values */ } /* wait for o_fid_a/b / e_fid_a/b toggle */ if ( vv->last_field == V4L2_FIELD_INTERLACED ) { WRITE_RPS0(CMD_PAUSE | o_wait); WRITE_RPS0(CMD_PAUSE | e_wait); } else if ( vv->last_field == V4L2_FIELD_TOP ) { WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09)); WRITE_RPS0(CMD_PAUSE | o_wait); } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09)); WRITE_RPS0(CMD_PAUSE | e_wait); } /* turn off video-dma1 */ WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); WRITE_RPS0(MASK_22 | MASK_06); /* => mask */ WRITE_RPS0(MASK_22); /* => values */ if( 0 != planar ) { /* turn off video-dma2 */ WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); WRITE_RPS0(MASK_05 | MASK_21); /* => mask */ WRITE_RPS0(MASK_21); /* => values */ /* turn off video-dma3 */ WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); WRITE_RPS0(MASK_04 | MASK_20); /* => mask */ WRITE_RPS0(MASK_20); /* => values */ } /* generate interrupt */ WRITE_RPS0(CMD_INTERRUPT); /* stop */ WRITE_RPS0(CMD_STOP); } void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) { struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); struct saa7146_vv *vv = dev->vv_data; u32 vdma1_prot_addr; DEB_CAP(("buf:%p, next:%p\n",buf,next)); vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1); if( 0 == vdma1_prot_addr ) { /* clear out beginning of streaming bit (rps register 0)*/ DEB_CAP(("forcing sync to new frame\n")); saa7146_write(dev, MC2, MASK_27 ); } saa7146_set_window(dev, buf->fmt->width, buf->fmt->height, buf->fmt->field); saa7146_set_output_format(dev, sfmt->trans); saa7146_disable_clipping(dev); if ( vv->last_field == V4L2_FIELD_INTERLACED ) { } else if ( vv->last_field == V4L2_FIELD_TOP ) { vv->last_field = V4L2_FIELD_BOTTOM; } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { vv->last_field = V4L2_FIELD_TOP; } if( 0 != IS_PLANAR(sfmt->trans)) { calculate_video_dma_grab_planar(dev, buf); program_capture_engine(dev,1); } else { calculate_video_dma_grab_packed(dev, buf); program_capture_engine(dev,0); } /* printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1)); printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1)); printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1)); printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1)); printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1)); printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1)); printk("vdma%d => vptr : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1)); */ /* write the address of the rps-program */ saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle); /* turn on rps */ saa7146_write(dev, MC1, (MASK_12 | MASK_28)); }
gpl-2.0
invisiblek/kernel_808l
fs/proc/proc_tty.c
1594
4831
/* * proc_tty.c -- handles /proc/tty * * Copyright 1997, Theodore Ts'o */ #include <asm/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/tty.h> #include <linux/seq_file.h> #include <linux/bitops.h> /* * The /proc/tty directory inodes... */ static struct proc_dir_entry *proc_tty_ldisc, *proc_tty_driver; /* * This is the handler for /proc/tty/drivers */ static void show_tty_range(struct seq_file *m, struct tty_driver *p, dev_t from, int num) { seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown"); seq_printf(m, "/dev/%-8s ", p->name); if (p->num > 1) { seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from), MINOR(from) + num - 1); } else { seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from)); } switch (p->type) { case TTY_DRIVER_TYPE_SYSTEM: seq_printf(m, "system"); if (p->subtype == SYSTEM_TYPE_TTY) seq_printf(m, ":/dev/tty"); else if (p->subtype == SYSTEM_TYPE_SYSCONS) seq_printf(m, ":console"); else if (p->subtype == SYSTEM_TYPE_CONSOLE) seq_printf(m, ":vtmaster"); break; case TTY_DRIVER_TYPE_CONSOLE: seq_printf(m, "console"); break; case TTY_DRIVER_TYPE_SERIAL: seq_printf(m, "serial"); break; case TTY_DRIVER_TYPE_PTY: if (p->subtype == PTY_TYPE_MASTER) seq_printf(m, "pty:master"); else if (p->subtype == PTY_TYPE_SLAVE) seq_printf(m, "pty:slave"); else seq_printf(m, "pty"); break; default: seq_printf(m, "type:%d.%d", p->type, p->subtype); } seq_putc(m, '\n'); } static int show_tty_driver(struct seq_file *m, void *v) { struct tty_driver *p = list_entry(v, struct tty_driver, tty_drivers); dev_t from = MKDEV(p->major, p->minor_start); dev_t to = from + p->num; if (&p->tty_drivers == tty_drivers.next) { /* pseudo-drivers first */ seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0); seq_printf(m, "system:/dev/tty\n"); seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1); seq_printf(m, "system:console\n"); #ifdef CONFIG_UNIX98_PTYS seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2); seq_printf(m, "system\n"); #endif #ifdef CONFIG_VT seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0"); seq_printf(m, "%3d %7d ", TTY_MAJOR, 0); seq_printf(m, "system:vtmaster\n"); #endif } while (MAJOR(from) < MAJOR(to)) { dev_t next = MKDEV(MAJOR(from)+1, 0); show_tty_range(m, p, from, next - from); from = next; } if (from != to) show_tty_range(m, p, from, to - from); return 0; } /* iterator */ static void *t_start(struct seq_file *m, loff_t *pos) { mutex_lock(&tty_mutex); return seq_list_start(&tty_drivers, *pos); } static void *t_next(struct seq_file *m, void *v, loff_t *pos) { return seq_list_next(v, &tty_drivers, pos); } static void t_stop(struct seq_file *m, void *v) { mutex_unlock(&tty_mutex); } static const struct seq_operations tty_drivers_op = { .start = t_start, .next = t_next, .stop = t_stop, .show = show_tty_driver }; static int tty_drivers_open(struct inode *inode, struct file *file) { return seq_open(file, &tty_drivers_op); } static const struct file_operations proc_tty_drivers_operations = { .open = tty_drivers_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * This function is called by tty_register_driver() to handle * registering the driver's /proc handler into /proc/tty/driver/<foo> */ void proc_tty_register_driver(struct tty_driver *driver) { struct proc_dir_entry *ent; if (!driver->driver_name || driver->proc_entry || !driver->ops->proc_fops) return; ent = proc_create_data(driver->driver_name, 0, proc_tty_driver, driver->ops->proc_fops, driver); driver->proc_entry = ent; } /* * This function is called by tty_unregister_driver() */ void proc_tty_unregister_driver(struct tty_driver *driver) { struct proc_dir_entry *ent; ent = driver->proc_entry; if (!ent) return; remove_proc_entry(driver->driver_name, proc_tty_driver); driver->proc_entry = NULL; } /* * Called by proc_root_init() to initialize the /proc/tty subtree */ void __init proc_tty_init(void) { if (!proc_mkdir("tty", NULL)) return; proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL); /* * /proc/tty/driver/serial reveals the exact character counts for * serial links which is just too easy to abuse for inferring * password lengths and inter-keystroke timings during password * entry. */ proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL); proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops); proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations); }
gpl-2.0
JerryScript/VaeVictus
drivers/infiniband/ulp/ipoib/ipoib_cm.c
2106
43416
/* * Copyright (c) 2006 Mellanox Technologies. All rights reserved * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_cm.h> #include <net/dst.h> #include <net/icmp.h> #include <linux/icmpv6.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "ipoib.h" int ipoib_max_conn_qp = 128; module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444); MODULE_PARM_DESC(max_nonsrq_conn_qp, "Max number of connected-mode QPs per interface " "(applied only if shared receive queue is not available)"); #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA static int data_debug_level; module_param_named(cm_data_debug_level, data_debug_level, int, 0644); MODULE_PARM_DESC(cm_data_debug_level, "Enable data path debug tracing for connected mode if > 0"); #endif #define IPOIB_CM_IETF_ID 0x1000000000000000ULL #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) #define IPOIB_CM_RX_UPDATE_MASK (0x3) static struct ib_qp_attr ipoib_cm_err_attr = { .qp_state = IB_QPS_ERR }; #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff static struct ib_send_wr ipoib_cm_rx_drain_wr = { .wr_id = IPOIB_CM_RX_DRAIN_WRID, .opcode = IB_WR_SEND, }; static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, u64 mapping[IPOIB_CM_RX_SG]) { int i; ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); for (i = 0; i < frags; ++i) ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); } static int ipoib_cm_post_receive_srq(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_recv_wr *bad_wr; int i, ret; priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; for (i = 0; i < priv->cm.num_frags; ++i) priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1, priv->cm.srq_ring[id].mapping); dev_kfree_skb_any(priv->cm.srq_ring[id].skb); priv->cm.srq_ring[id].skb = NULL; } return ret; } static int ipoib_cm_post_receive_nonsrq(struct net_device *dev, struct ipoib_cm_rx *rx, struct ib_recv_wr *wr, struct ib_sge *sge, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_recv_wr *bad_wr; int i, ret; wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; for (i = 0; i < IPOIB_CM_RX_SG; ++i) sge[i].addr = rx->rx_ring[id].mapping[i]; ret = ib_post_recv(rx->qp, wr, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret); ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, rx->rx_ring[id].mapping); dev_kfree_skb_any(rx->rx_ring[id].skb); rx->rx_ring[id].skb = NULL; } return ret; } static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, struct ipoib_cm_rx_buf *rx_ring, int id, int frags, u64 mapping[IPOIB_CM_RX_SG]) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; int i; skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); if (unlikely(!skb)) return NULL; /* * IPoIB adds a 4 byte header. So we need 12 more bytes to align the * IP header to a multiple of 16. */ skb_reserve(skb, 12); mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { dev_kfree_skb_any(skb); return NULL; } for (i = 0; i < frags; i++) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) goto partial_error; skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) goto partial_error; } rx_ring[id].skb = skb; return skb; partial_error: ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); for (; i > 0; --i) ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); return NULL; } static void ipoib_cm_free_rx_ring(struct net_device *dev, struct ipoib_cm_rx_buf *rx_ring) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i; for (i = 0; i < ipoib_recvq_size; ++i) if (rx_ring[i].skb) { ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, rx_ring[i].mapping); dev_kfree_skb_any(rx_ring[i].skb); } vfree(rx_ring); } static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) { struct ib_send_wr *bad_wr; struct ipoib_cm_rx *p; /* We only reserved 1 extra slot in CQ for drain WRs, so * make sure we have at most 1 outstanding WR. */ if (list_empty(&priv->cm.rx_flush_list) || !list_empty(&priv->cm.rx_drain_list)) return; /* * QPs on flush list are error state. This way, a "flush * error" WC will be immediately generated for each WR we post. */ p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) ipoib_warn(priv, "failed to post drain wr\n"); list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); } static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) { struct ipoib_cm_rx *p = ctx; struct ipoib_dev_priv *priv = netdev_priv(p->dev); unsigned long flags; if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) return; spin_lock_irqsave(&priv->lock, flags); list_move(&p->list, &priv->cm.rx_flush_list); p->state = IPOIB_CM_RX_FLUSH; ipoib_cm_start_rx_drain(priv); spin_unlock_irqrestore(&priv->lock, flags); } static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, struct ipoib_cm_rx *p) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_init_attr attr = { .event_handler = ipoib_cm_rx_event_handler, .send_cq = priv->recv_cq, /* For drain WR */ .recv_cq = priv->recv_cq, .srq = priv->cm.srq, .cap.max_send_wr = 1, /* For drain WR */ .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_RC, .qp_context = p, }; if (!ipoib_cm_has_srq(dev)) { attr.cap.max_recv_wr = ipoib_recvq_size; attr.cap.max_recv_sge = IPOIB_CM_RX_SG; } return ib_create_qp(priv->pd, &attr); } static int ipoib_cm_modify_rx_qp(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, unsigned psn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); return ret; } ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); return ret; } qp_attr.qp_state = IB_QPS_RTR; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); return ret; } qp_attr.rq_psn = psn; ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); return ret; } /* * Current Mellanox HCA firmware won't generate completions * with error for drain WRs unless the QP has been moved to * RTS first. This work-around leaves a window where a QP has * moved to error asynchronously, but this will eventually get * fixed in firmware, so let's not error out if modify QP * fails. */ qp_attr.qp_state = IB_QPS_RTS; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); return 0; } ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); return 0; } return 0; } static void ipoib_cm_init_rx_wr(struct net_device *dev, struct ib_recv_wr *wr, struct ib_sge *sge) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i; for (i = 0; i < priv->cm.num_frags; ++i) sge[i].lkey = priv->mr->lkey; sge[0].length = IPOIB_CM_HEAD_SIZE; for (i = 1; i < priv->cm.num_frags; ++i) sge[i].length = PAGE_SIZE; wr->next = NULL; wr->sg_list = sge; wr->num_sge = priv->cm.num_frags; } static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct { struct ib_recv_wr wr; struct ib_sge sge[IPOIB_CM_RX_SG]; } *t; int ret; int i; rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring); if (!rx->rx_ring) { printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", priv->ca->name, ipoib_recvq_size); return -ENOMEM; } t = kmalloc(sizeof *t, GFP_KERNEL); if (!t) { ret = -ENOMEM; goto err_free; } ipoib_cm_init_rx_wr(dev, &t->wr, t->sge); spin_lock_irq(&priv->lock); if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) { spin_unlock_irq(&priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); ret = -EINVAL; goto err_free; } else ++priv->cm.nonsrq_conn_qp; spin_unlock_irq(&priv->lock); for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1, rx->rx_ring[i].mapping)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); ret = -ENOMEM; goto err_count; } ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); if (ret) { ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq " "failed for buf %d\n", i); ret = -EIO; goto err_count; } } rx->recv_count = ipoib_recvq_size; kfree(t); return 0; err_count: spin_lock_irq(&priv->lock); --priv->cm.nonsrq_conn_qp; spin_unlock_irq(&priv->lock); err_free: kfree(t); ipoib_cm_free_rx_ring(dev, rx->rx_ring); return ret; } static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, struct ib_cm_req_event_param *req, unsigned psn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_data data = {}; struct ib_cm_rep_param rep = {}; data.qpn = cpu_to_be32(priv->qp->qp_num); data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); rep.private_data = &data; rep.private_data_len = sizeof data; rep.flow_control = 0; rep.rnr_retry_count = req->rnr_retry_count; rep.srq = ipoib_cm_has_srq(dev); rep.qp_num = qp->qp_num; rep.starting_psn = psn; return ib_send_cm_rep(cm_id, &rep); } static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) { struct net_device *dev = cm_id->context; struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_rx *p; unsigned psn; int ret; ipoib_dbg(priv, "REQ arrived\n"); p = kzalloc(sizeof *p, GFP_KERNEL); if (!p) return -ENOMEM; p->dev = dev; p->id = cm_id; cm_id->context = p; p->state = IPOIB_CM_RX_LIVE; p->jiffies = jiffies; INIT_LIST_HEAD(&p->list); p->qp = ipoib_cm_create_rx_qp(dev, p); if (IS_ERR(p->qp)) { ret = PTR_ERR(p->qp); goto err_qp; } psn = random32() & 0xffffff; ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); if (ret) goto err_modify; if (!ipoib_cm_has_srq(dev)) { ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p); if (ret) goto err_modify; } spin_lock_irq(&priv->lock); queue_delayed_work(ipoib_workqueue, &priv->cm.stale_task, IPOIB_CM_RX_DELAY); /* Add this entry to passive ids list head, but do not re-add it * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ p->jiffies = jiffies; if (p->state == IPOIB_CM_RX_LIVE) list_move(&p->list, &priv->cm.passive_ids); spin_unlock_irq(&priv->lock); ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); if (ret) { ipoib_warn(priv, "failed to send REP: %d\n", ret); if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) ipoib_warn(priv, "unable to move qp to error state\n"); } return 0; err_modify: ib_destroy_qp(p->qp); err_qp: kfree(p); return ret; } static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) { struct ipoib_cm_rx *p; struct ipoib_dev_priv *priv; switch (event->event) { case IB_CM_REQ_RECEIVED: return ipoib_cm_req_handler(cm_id, event); case IB_CM_DREQ_RECEIVED: p = cm_id->context; ib_send_cm_drep(cm_id, NULL, 0); /* Fall through */ case IB_CM_REJ_RECEIVED: p = cm_id->context; priv = netdev_priv(p->dev); if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) ipoib_warn(priv, "unable to move qp to error state\n"); /* Fall through */ default: return 0; } } /* Adjust length of skb with fragments to match received data */ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, unsigned int length, struct sk_buff *toskb) { int i, num_frags; unsigned int size; /* put header into skb */ size = min(length, hdr_space); skb->tail += size; skb->len += size; length -= size; num_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < num_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (length == 0) { /* don't need this page */ skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); --skb_shinfo(skb)->nr_frags; } else { size = min(length, (unsigned) PAGE_SIZE); frag->size = size; skb->data_len += size; skb->truesize += size; skb->len += size; length -= size; } } } void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_rx_buf *rx_ring; unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); struct sk_buff *skb, *newskb; struct ipoib_cm_rx *p; unsigned long flags; u64 mapping[IPOIB_CM_RX_SG]; int frags; int has_srq; struct sk_buff *small_skb; ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_recvq_size)) { if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { spin_lock_irqsave(&priv->lock, flags); list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); ipoib_cm_start_rx_drain(priv); queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); spin_unlock_irqrestore(&priv->lock, flags); } else ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", wr_id, ipoib_recvq_size); return; } p = wc->qp->qp_context; has_srq = ipoib_cm_has_srq(dev); rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; skb = rx_ring[wr_id].skb; if (unlikely(wc->status != IB_WC_SUCCESS)) { ipoib_dbg(priv, "cm recv error " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); ++dev->stats.rx_dropped; if (has_srq) goto repost; else { if (!--p->recv_count) { spin_lock_irqsave(&priv->lock, flags); list_move(&p->list, &priv->cm.rx_reap_list); spin_unlock_irqrestore(&priv->lock, flags); queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); } return; } } if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) { if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { spin_lock_irqsave(&priv->lock, flags); p->jiffies = jiffies; /* Move this entry to list head, but do not re-add it * if it has been moved out of list. */ if (p->state == IPOIB_CM_RX_LIVE) list_move(&p->list, &priv->cm.passive_ids); spin_unlock_irqrestore(&priv->lock, flags); } } if (wc->byte_len < IPOIB_CM_COPYBREAK) { int dlen = wc->byte_len; small_skb = dev_alloc_skb(dlen + 12); if (small_skb) { skb_reserve(small_skb, 12); ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], dlen, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, small_skb->data, dlen); ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0], dlen, DMA_FROM_DEVICE); skb_put(small_skb, dlen); skb = small_skb; goto copied; } } frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping); if (unlikely(!newskb)) { /* * If we can't allocate a new RX buffer, dump * this packet and reuse the old buffer. */ ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); ++dev->stats.rx_dropped; goto repost; } ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping); memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", wc->byte_len, wc->slid); skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); copied: skb->protocol = ((struct ipoib_header *) skb->data)->proto; skb_reset_mac_header(skb); skb_pull(skb, IPOIB_ENCAP_LEN); ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; skb->dev = dev; /* XXX get correct PACKET_ type here */ skb->pkt_type = PACKET_HOST; netif_receive_skb(skb); repost: if (has_srq) { if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id))) ipoib_warn(priv, "ipoib_cm_post_receive_srq failed " "for buf %d\n", wr_id); } else { if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p, &priv->cm.rx_wr, priv->cm.rx_sge, wr_id))) { --p->recv_count; ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed " "for buf %d\n", wr_id); } } } static inline int post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, u64 addr, int len) { struct ib_send_wr *bad_wr; priv->tx_sge[0].addr = addr; priv->tx_sge[0].length = len; priv->tx_wr.num_sge = 1; priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); } void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_tx_buf *tx_req; u64 addr; int rc; if (unlikely(skb->len > tx->mtu)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, tx->mtu); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); return; } ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", tx->tx_head, skb->len, tx->qp->qp_num); /* * We put the skb into the tx_ring _before_ we call post_send() * because it's entirely possible that the completion handler will * run before we execute anything after the post_send(). That * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } tx_req->mapping = addr; rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), addr, skb->len); if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } else { dev->trans_start = jiffies; ++tx->tx_head; if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) ipoib_warn(priv, "request notify on send CQ failed\n"); netif_stop_queue(dev); } } } void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_tx *tx = wc->qp->qp_context; unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; struct ipoib_cm_tx_buf *tx_req; unsigned long flags; ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_sendq_size)) { ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", wr_id, ipoib_sendq_size); return; } tx_req = &tx->tx_ring[wr_id]; ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); /* FIXME: is this right? Shouldn't we only increment on success? */ ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; dev_kfree_skb_any(tx_req->skb); netif_tx_lock(dev); ++tx->tx_tail; if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && netif_queue_stopped(dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(dev); if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) { struct ipoib_neigh *neigh; ipoib_dbg(priv, "failed cm send event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); spin_lock_irqsave(&priv->lock, flags); neigh = tx->neigh; if (neigh) { neigh->cm = NULL; list_del(&neigh->list); if (neigh->ah) ipoib_put_ah(neigh->ah); ipoib_neigh_free(dev, neigh); tx->neigh = NULL; } if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); queue_work(ipoib_workqueue, &priv->cm.reap_task); } clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); spin_unlock_irqrestore(&priv->lock, flags); } netif_tx_unlock(dev); } int ipoib_cm_dev_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) return 0; priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); if (IS_ERR(priv->cm.id)) { printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); ret = PTR_ERR(priv->cm.id); goto err_cm; } ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 0, NULL); if (ret) { printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, IPOIB_CM_IETF_ID | priv->qp->qp_num); goto err_listen; } return 0; err_listen: ib_destroy_cm_id(priv->cm.id); err_cm: priv->cm.id = NULL; return ret; } static void ipoib_cm_free_rx_reap_list(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_rx *rx, *n; LIST_HEAD(list); spin_lock_irq(&priv->lock); list_splice_init(&priv->cm.rx_reap_list, &list); spin_unlock_irq(&priv->lock); list_for_each_entry_safe(rx, n, &list, list) { ib_destroy_cm_id(rx->id); ib_destroy_qp(rx->qp); if (!ipoib_cm_has_srq(dev)) { ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring); spin_lock_irq(&priv->lock); --priv->cm.nonsrq_conn_qp; spin_unlock_irq(&priv->lock); } kfree(rx); } } void ipoib_cm_dev_stop(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_rx *p; unsigned long begin; int ret; if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) return; ib_destroy_cm_id(priv->cm.id); priv->cm.id = NULL; spin_lock_irq(&priv->lock); while (!list_empty(&priv->cm.passive_ids)) { p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); list_move(&p->list, &priv->cm.rx_error_list); p->state = IPOIB_CM_RX_ERROR; spin_unlock_irq(&priv->lock); ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); if (ret) ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); spin_lock_irq(&priv->lock); } /* Wait for all RX to be drained */ begin = jiffies; while (!list_empty(&priv->cm.rx_error_list) || !list_empty(&priv->cm.rx_flush_list) || !list_empty(&priv->cm.rx_drain_list)) { if (time_after(jiffies, begin + 5 * HZ)) { ipoib_warn(priv, "RX drain timing out\n"); /* * assume the HW is wedged and just free up everything. */ list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_reap_list); list_splice_init(&priv->cm.rx_error_list, &priv->cm.rx_reap_list); list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); break; } spin_unlock_irq(&priv->lock); msleep(1); ipoib_drain_cq(dev); spin_lock_irq(&priv->lock); } spin_unlock_irq(&priv->lock); ipoib_cm_free_rx_reap_list(dev); cancel_delayed_work(&priv->cm.stale_task); } static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) { struct ipoib_cm_tx *p = cm_id->context; struct ipoib_dev_priv *priv = netdev_priv(p->dev); struct ipoib_cm_data *data = event->private_data; struct sk_buff_head skqueue; struct ib_qp_attr qp_attr; int qp_attr_mask, ret; struct sk_buff *skb; p->mtu = be32_to_cpu(data->mtu); if (p->mtu <= IPOIB_ENCAP_LEN) { ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", p->mtu, IPOIB_ENCAP_LEN); return -EINVAL; } qp_attr.qp_state = IB_QPS_RTR; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); return ret; } qp_attr.rq_psn = 0 /* FIXME */; ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); return ret; } qp_attr.qp_state = IB_QPS_RTS; ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); return ret; } ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); return ret; } skb_queue_head_init(&skqueue); spin_lock_irq(&priv->lock); set_bit(IPOIB_FLAG_OPER_UP, &p->flags); if (p->neigh) while ((skb = __skb_dequeue(&p->neigh->queue))) __skb_queue_tail(&skqueue, skb); spin_unlock_irq(&priv->lock); while ((skb = __skb_dequeue(&skqueue))) { skb->dev = p->dev; if (dev_queue_xmit(skb)) ipoib_warn(priv, "dev_queue_xmit failed " "to requeue packet\n"); } ret = ib_send_cm_rtu(cm_id, NULL, 0); if (ret) { ipoib_warn(priv, "failed to send RTU: %d\n", ret); return ret; } return 0; } static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_init_attr attr = { .send_cq = priv->recv_cq, .recv_cq = priv->recv_cq, .srq = priv->cm.srq, .cap.max_send_wr = ipoib_sendq_size, .cap.max_send_sge = 1, .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_RC, .qp_context = tx }; return ib_create_qp(priv->pd, &attr); } static int ipoib_cm_send_req(struct net_device *dev, struct ib_cm_id *id, struct ib_qp *qp, u32 qpn, struct ib_sa_path_rec *pathrec) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_data data = {}; struct ib_cm_req_param req = {}; data.qpn = cpu_to_be32(priv->qp->qp_num); data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); req.primary_path = pathrec; req.alternate_path = NULL; req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); req.qp_num = qp->qp_num; req.qp_type = qp->qp_type; req.private_data = &data; req.private_data_len = sizeof data; req.flow_control = 0; req.starting_psn = 0; /* FIXME */ /* * Pick some arbitrary defaults here; we could make these * module parameters if anyone cared about setting them. */ req.responder_resources = 4; req.remote_cm_response_timeout = 20; req.local_cm_response_timeout = 20; req.retry_count = 0; /* RFC draft warns against retries */ req.rnr_retry_count = 0; /* RFC draft warns against retries */ req.max_cm_retries = 15; req.srq = ipoib_cm_has_srq(dev); return ib_send_cm_req(id, &req); } static int ipoib_cm_modify_tx_init(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; int qp_attr_mask, ret; ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); if (ret) { ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); return ret; } qp_attr.qp_state = IB_QPS_INIT; qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; qp_attr.port_num = priv->port; qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) { ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); return ret; } return 0; } static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, struct ib_sa_path_rec *pathrec) { struct ipoib_dev_priv *priv = netdev_priv(p->dev); int ret; p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring); if (!p->tx_ring) { ipoib_warn(priv, "failed to allocate tx ring\n"); ret = -ENOMEM; goto err_tx; } p->qp = ipoib_cm_create_tx_qp(p->dev, p); if (IS_ERR(p->qp)) { ret = PTR_ERR(p->qp); ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); goto err_qp; } p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); if (IS_ERR(p->id)) { ret = PTR_ERR(p->id); ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); goto err_id; } ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); if (ret) { ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); goto err_modify; } ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); if (ret) { ipoib_warn(priv, "failed to send cm req: %d\n", ret); goto err_send_cm; } ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n", p->qp->qp_num, pathrec->dgid.raw, qpn); return 0; err_send_cm: err_modify: ib_destroy_cm_id(p->id); err_id: p->id = NULL; ib_destroy_qp(p->qp); err_qp: p->qp = NULL; vfree(p->tx_ring); err_tx: return ret; } static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) { struct ipoib_dev_priv *priv = netdev_priv(p->dev); struct ipoib_cm_tx_buf *tx_req; unsigned long begin; ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); if (p->id) ib_destroy_cm_id(p->id); if (p->tx_ring) { /* Wait for all sends to complete */ begin = jiffies; while ((int) p->tx_tail - (int) p->tx_head < 0) { if (time_after(jiffies, begin + 5 * HZ)) { ipoib_warn(priv, "timing out; %d sends not completed\n", p->tx_head - p->tx_tail); goto timeout; } msleep(1); } } timeout: while ((int) p->tx_tail - (int) p->tx_head < 0) { tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(tx_req->skb); ++p->tx_tail; netif_tx_lock_bh(p->dev); if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && netif_queue_stopped(p->dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(p->dev); netif_tx_unlock_bh(p->dev); } if (p->qp) ib_destroy_qp(p->qp); vfree(p->tx_ring); kfree(p); } static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) { struct ipoib_cm_tx *tx = cm_id->context; struct ipoib_dev_priv *priv = netdev_priv(tx->dev); struct net_device *dev = priv->dev; struct ipoib_neigh *neigh; unsigned long flags; int ret; switch (event->event) { case IB_CM_DREQ_RECEIVED: ipoib_dbg(priv, "DREQ received.\n"); ib_send_cm_drep(cm_id, NULL, 0); break; case IB_CM_REP_RECEIVED: ipoib_dbg(priv, "REP received.\n"); ret = ipoib_cm_rep_handler(cm_id, event); if (ret) ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); break; case IB_CM_REQ_ERROR: case IB_CM_REJ_RECEIVED: case IB_CM_TIMEWAIT_EXIT: ipoib_dbg(priv, "CM error %d.\n", event->event); netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); neigh = tx->neigh; if (neigh) { neigh->cm = NULL; list_del(&neigh->list); if (neigh->ah) ipoib_put_ah(neigh->ah); ipoib_neigh_free(dev, neigh); tx->neigh = NULL; } if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); queue_work(ipoib_workqueue, &priv->cm.reap_task); } spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); break; default: break; } return 0; } struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, struct ipoib_neigh *neigh) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_tx *tx; tx = kzalloc(sizeof *tx, GFP_ATOMIC); if (!tx) return NULL; neigh->cm = tx; tx->neigh = neigh; tx->path = path; tx->dev = dev; list_add(&tx->list, &priv->cm.start_list); set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); queue_work(ipoib_workqueue, &priv->cm.start_task); return tx; } void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(tx->dev); if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); queue_work(ipoib_workqueue, &priv->cm.reap_task); ipoib_dbg(priv, "Reap connection for gid %pI6\n", tx->neigh->dgid.raw); tx->neigh = NULL; } } static void ipoib_cm_tx_start(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, cm.start_task); struct net_device *dev = priv->dev; struct ipoib_neigh *neigh; struct ipoib_cm_tx *p; unsigned long flags; int ret; struct ib_sa_path_rec pathrec; u32 qpn; netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); while (!list_empty(&priv->cm.start_list)) { p = list_entry(priv->cm.start_list.next, typeof(*p), list); list_del_init(&p->list); neigh = p->neigh; qpn = IPOIB_QPN(neigh->neighbour->ha); memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); ret = ipoib_cm_tx_init(p, qpn, &pathrec); netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); if (ret) { neigh = p->neigh; if (neigh) { neigh->cm = NULL; list_del(&neigh->list); if (neigh->ah) ipoib_put_ah(neigh->ah); ipoib_neigh_free(dev, neigh); } list_del(&p->list); kfree(p); } } spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); } static void ipoib_cm_tx_reap(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, cm.reap_task); struct net_device *dev = priv->dev; struct ipoib_cm_tx *p; unsigned long flags; netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); while (!list_empty(&priv->cm.reap_list)) { p = list_entry(priv->cm.reap_list.next, typeof(*p), list); list_del(&p->list); spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); ipoib_cm_tx_destroy(p); netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); } spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); } static void ipoib_cm_skb_reap(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, cm.skb_task); struct net_device *dev = priv->dev; struct sk_buff *skb; unsigned long flags; unsigned mtu = priv->mcast_mtu; netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); while ((skb = skb_dequeue(&priv->cm.skb_queue))) { spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); if (skb->protocol == htons(ETH_P_IP)) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); #endif dev_kfree_skb_any(skb); netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); } spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); } void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, unsigned int mtu) { struct ipoib_dev_priv *priv = netdev_priv(dev); int e = skb_queue_empty(&priv->cm.skb_queue); if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); skb_queue_tail(&priv->cm.skb_queue, skb); if (e) queue_work(ipoib_workqueue, &priv->cm.skb_task); } static void ipoib_cm_rx_reap(struct work_struct *work) { ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv, cm.rx_reap_task)->dev); } static void ipoib_cm_stale_task(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, cm.stale_task.work); struct ipoib_cm_rx *p; int ret; spin_lock_irq(&priv->lock); while (!list_empty(&priv->cm.passive_ids)) { /* List is sorted by LRU, start from tail, * stop when we see a recently used entry */ p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) break; list_move(&p->list, &priv->cm.rx_error_list); p->state = IPOIB_CM_RX_ERROR; spin_unlock_irq(&priv->lock); ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); if (ret) ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); spin_lock_irq(&priv->lock); } if (!list_empty(&priv->cm.passive_ids)) queue_delayed_work(ipoib_workqueue, &priv->cm.stale_task, IPOIB_CM_RX_DELAY); spin_unlock_irq(&priv->lock); } static ssize_t show_mode(struct device *d, struct device_attribute *attr, char *buf) { struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d)); if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) return sprintf(buf, "connected\n"); else return sprintf(buf, "datagram\n"); } static ssize_t set_mode(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *dev = to_net_dev(d); struct ipoib_dev_priv *priv = netdev_priv(dev); if (!rtnl_trylock()) return restart_syscall(); /* flush paths if we switch modes so that connections are restarted */ if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); ipoib_warn(priv, "enabling connected mode " "will cause multicast packet drops\n"); netdev_update_features(dev); rtnl_unlock(); priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; ipoib_flush_paths(dev); return count; } if (!strcmp(buf, "datagram\n")) { clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); netdev_update_features(dev); dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); rtnl_unlock(); ipoib_flush_paths(dev); return count; } rtnl_unlock(); return -EINVAL; } static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); int ipoib_cm_add_mode_attr(struct net_device *dev) { return device_create_file(&dev->dev, &dev_attr_mode); } static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_srq_init_attr srq_init_attr = { .attr = { .max_wr = ipoib_recvq_size, .max_sge = max_sge } }; priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); if (IS_ERR(priv->cm.srq)) { if (PTR_ERR(priv->cm.srq) != -ENOSYS) printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n", priv->ca->name, PTR_ERR(priv->cm.srq)); priv->cm.srq = NULL; return; } priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); if (!priv->cm.srq_ring) { printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", priv->ca->name, ipoib_recvq_size); ib_destroy_srq(priv->cm.srq); priv->cm.srq = NULL; return; } } int ipoib_cm_dev_init(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i, ret; struct ib_device_attr attr; INIT_LIST_HEAD(&priv->cm.passive_ids); INIT_LIST_HEAD(&priv->cm.reap_list); INIT_LIST_HEAD(&priv->cm.start_list); INIT_LIST_HEAD(&priv->cm.rx_error_list); INIT_LIST_HEAD(&priv->cm.rx_flush_list); INIT_LIST_HEAD(&priv->cm.rx_drain_list); INIT_LIST_HEAD(&priv->cm.rx_reap_list); INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); skb_queue_head_init(&priv->cm.skb_queue); ret = ib_query_device(priv->ca, &attr); if (ret) { printk(KERN_WARNING "ib_query_device() failed with %d\n", ret); return ret; } ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge); attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge); ipoib_cm_create_srq(dev, attr.max_srq_sge); if (ipoib_cm_has_srq(dev)) { priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10; priv->cm.num_frags = attr.max_srq_sge; ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n", priv->cm.max_cm_mtu, priv->cm.num_frags); } else { priv->cm.max_cm_mtu = IPOIB_CM_MTU; priv->cm.num_frags = IPOIB_CM_RX_SG; } ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge); if (ipoib_cm_has_srq(dev)) { for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, priv->cm.num_frags - 1, priv->cm.srq_ring[i].mapping)) { ipoib_warn(priv, "failed to allocate " "receive buffer %d\n", i); ipoib_cm_dev_cleanup(dev); return -ENOMEM; } if (ipoib_cm_post_receive_srq(dev, i)) { ipoib_warn(priv, "ipoib_cm_post_receive_srq " "failed for buf %d\n", i); ipoib_cm_dev_cleanup(dev); return -EIO; } } } priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; return 0; } void ipoib_cm_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; if (!priv->cm.srq) return; ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); ret = ib_destroy_srq(priv->cm.srq); if (ret) ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); priv->cm.srq = NULL; if (!priv->cm.srq_ring) return; ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring); priv->cm.srq_ring = NULL; }
gpl-2.0
batman38102/android_kernel_samsung_mint
arch/arm/plat-mxc/devices/platform-imx-fb.c
2362
1645
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_imx_fb_data_entry_single(soc, _size) \ { \ .iobase = soc ## _LCDC_BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_LCDC, \ } #ifdef CONFIG_SOC_IMX1 const struct imx_imx_fb_data imx1_imx_fb_data __initconst = imx_imx_fb_data_entry_single(MX1, SZ_4K); #endif /* ifdef CONFIG_SOC_IMX1 */ #ifdef CONFIG_SOC_IMX21 const struct imx_imx_fb_data imx21_imx_fb_data __initconst = imx_imx_fb_data_entry_single(MX21, SZ_4K); #endif /* ifdef CONFIG_SOC_IMX21 */ #ifdef CONFIG_SOC_IMX25 const struct imx_imx_fb_data imx25_imx_fb_data __initconst = imx_imx_fb_data_entry_single(MX25, SZ_16K); #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_imx_fb_data imx27_imx_fb_data __initconst = imx_imx_fb_data_entry_single(MX27, SZ_4K); #endif /* ifdef CONFIG_SOC_IMX27 */ struct platform_device *__init imx_add_imx_fb( const struct imx_imx_fb_data *data, const struct imx_fb_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask("imx-fb", 0, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); }
gpl-2.0
CyanogenMod/android_kernel_asus_tf201
drivers/staging/comedi/drivers/amplc_dio200.c
3386
40903
/* comedi/drivers/amplc_dio200.c Driver for Amplicon PC272E and PCI272 DIO boards. (Support for other boards in Amplicon 200 series may be added at a later date, e.g. PCI215.) Copyright (C) 2005 MEV Ltd. <http://www.mev.co.uk/> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: amplc_dio200 Description: Amplicon 200 Series Digital I/O Author: Ian Abbott <abbotti@mev.co.uk> Devices: [Amplicon] PC212E (pc212e), PC214E (pc214e), PC215E (pc215e), PCI215 (pci215 or amplc_dio200), PC218E (pc218e), PC272E (pc272e), PCI272 (pci272 or amplc_dio200) Updated: Wed, 22 Oct 2008 13:36:02 +0100 Status: works Configuration options - PC212E, PC214E, PC215E, PC218E, PC272E: [0] - I/O port base address [1] - IRQ (optional, but commands won't work without it) Configuration options - PCI215, PCI272: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first available PCI device will be used. Passing a zero for an option is the same as leaving it unspecified. SUBDEVICES PC218E PC212E PC215E/PCI215 ------------- ------------- ------------- Subdevices 7 6 5 0 CTR-X1 PPI-X PPI-X 1 CTR-X2 CTR-Y1 PPI-Y 2 CTR-Y1 CTR-Y2 CTR-Z1 3 CTR-Y2 CTR-Z1 CTR-Z2 4 CTR-Z1 CTR-Z2 INTERRUPT 5 CTR-Z2 INTERRUPT 6 INTERRUPT PC214E PC272E/PCI272 ------------- ------------- Subdevices 4 4 0 PPI-X PPI-X 1 PPI-Y PPI-Y 2 CTR-Z1* PPI-Z 3 INTERRUPT* INTERRUPT Each PPI is a 8255 chip providing 24 DIO channels. The DIO channels are configurable as inputs or outputs in four groups: Port A - channels 0 to 7 Port B - channels 8 to 15 Port CL - channels 16 to 19 Port CH - channels 20 to 23 Only mode 0 of the 8255 chips is supported. Each CTR is a 8254 chip providing 3 16-bit counter channels. Each channel is configured individually with INSN_CONFIG instructions. The specific type of configuration instruction is specified in data[0]. Some configuration instructions expect an additional parameter in data[1]; others return a value in data[1]. The following configuration instructions are supported: INSN_CONFIG_SET_COUNTER_MODE. Sets the counter channel's mode and BCD/binary setting specified in data[1]. INSN_CONFIG_8254_READ_STATUS. Reads the status register value for the counter channel into data[1]. INSN_CONFIG_SET_CLOCK_SRC. Sets the counter channel's clock source as specified in data[1] (this is a hardware-specific value). Not supported on PC214E. For the other boards, valid clock sources are 0 to 7 as follows: 0. CLK n, the counter channel's dedicated CLK input from the SK1 connector. (N.B. for other values, the counter channel's CLKn pin on the SK1 connector is an output!) 1. Internal 10 MHz clock. 2. Internal 1 MHz clock. 3. Internal 100 kHz clock. 4. Internal 10 kHz clock. 5. Internal 1 kHz clock. 6. OUT n-1, the output of counter channel n-1 (see note 1 below). 7. Ext Clock, the counter chip's dedicated Ext Clock input from the SK1 connector. This pin is shared by all three counter channels on the chip. INSN_CONFIG_GET_CLOCK_SRC. Returns the counter channel's current clock source in data[1]. For internal clock sources, data[2] is set to the period in ns. INSN_CONFIG_SET_GATE_SRC. Sets the counter channel's gate source as specified in data[2] (this is a hardware-specific value). Not supported on PC214E. For the other boards, valid gate sources are 0 to 7 as follows: 0. VCC (internal +5V d.c.), i.e. gate permanently enabled. 1. GND (internal 0V d.c.), i.e. gate permanently disabled. 2. GAT n, the counter channel's dedicated GAT input from the SK1 connector. (N.B. for other values, the counter channel's GATn pin on the SK1 connector is an output!) 3. /OUT n-2, the inverted output of counter channel n-2 (see note 2 below). 4. Reserved. 5. Reserved. 6. Reserved. 7. Reserved. INSN_CONFIG_GET_GATE_SRC. Returns the counter channel's current gate source in data[2]. Clock and gate interconnection notes: 1. Clock source OUT n-1 is the output of the preceding channel on the same counter subdevice if n > 0, or the output of channel 2 on the preceding counter subdevice (see note 3) if n = 0. 2. Gate source /OUT n-2 is the inverted output of channel 0 on the same counter subdevice if n = 2, or the inverted output of channel n+1 on the preceding counter subdevice (see note 3) if n < 2. 3. The counter subdevices are connected in a ring, so the highest counter subdevice precedes the lowest. The 'INTERRUPT' subdevice pretends to be a digital input subdevice. The digital inputs come from the interrupt status register. The number of channels matches the number of interrupt sources. The PC214E does not have an interrupt status register; see notes on 'INTERRUPT SOURCES' below. INTERRUPT SOURCES PC218E PC212E PC215E/PCI215 ------------- ------------- ------------- Sources 6 6 6 0 CTR-X1-OUT PPI-X-C0 PPI-X-C0 1 CTR-X2-OUT PPI-X-C3 PPI-X-C3 2 CTR-Y1-OUT CTR-Y1-OUT PPI-Y-C0 3 CTR-Y2-OUT CTR-Y2-OUT PPI-Y-C3 4 CTR-Z1-OUT CTR-Z1-OUT CTR-Z1-OUT 5 CTR-Z2-OUT CTR-Z2-OUT CTR-Z2-OUT PC214E PC272E/PCI272 ------------- ------------- Sources 1 6 0 JUMPER-J5 PPI-X-C0 1 PPI-X-C3 2 PPI-Y-C0 3 PPI-Y-C3 4 PPI-Z-C0 5 PPI-Z-C3 When an interrupt source is enabled in the interrupt source enable register, a rising edge on the source signal latches the corresponding bit to 1 in the interrupt status register. When the interrupt status register value as a whole (actually, just the 6 least significant bits) goes from zero to non-zero, the board will generate an interrupt. For level-triggered hardware interrupts (PCI card), the interrupt will remain asserted until the interrupt status register is cleared to zero. For edge-triggered hardware interrupts (ISA card), no further interrupts will occur until the interrupt status register is cleared to zero. To clear a bit to zero in the interrupt status register, the corresponding interrupt source must be disabled in the interrupt source enable register (there is no separate interrupt clear register). The PC214E does not have an interrupt source enable register or an interrupt status register; its 'INTERRUPT' subdevice has a single channel and its interrupt source is selected by the position of jumper J5. COMMANDS The driver supports a read streaming acquisition command on the 'INTERRUPT' subdevice. The channel list selects the interrupt sources to be enabled. All channels will be sampled together (convert_src == TRIG_NOW). The scan begins a short time after the hardware interrupt occurs, subject to interrupt latencies (scan_begin_src == TRIG_EXT, scan_begin_arg == 0). The value read from the interrupt status register is packed into a short value, one bit per requested channel, in the order they appear in the channel list. */ #include <linux/interrupt.h> #include <linux/slab.h> #include "../comedidev.h" #include "comedi_pci.h" #include "8255.h" #include "8253.h" #define DIO200_DRIVER_NAME "amplc_dio200" /* PCI IDs */ #define PCI_VENDOR_ID_AMPLICON 0x14dc #define PCI_DEVICE_ID_AMPLICON_PCI272 0x000a #define PCI_DEVICE_ID_AMPLICON_PCI215 0x000b #define PCI_DEVICE_ID_INVALID 0xffff /* 200 series registers */ #define DIO200_IO_SIZE 0x20 #define DIO200_XCLK_SCE 0x18 /* Group X clock selection register */ #define DIO200_YCLK_SCE 0x19 /* Group Y clock selection register */ #define DIO200_ZCLK_SCE 0x1a /* Group Z clock selection register */ #define DIO200_XGAT_SCE 0x1b /* Group X gate selection register */ #define DIO200_YGAT_SCE 0x1c /* Group Y gate selection register */ #define DIO200_ZGAT_SCE 0x1d /* Group Z gate selection register */ #define DIO200_INT_SCE 0x1e /* Interrupt enable/status register */ /* * Macros for constructing value for DIO_200_?CLK_SCE and * DIO_200_?GAT_SCE registers: * * 'which' is: 0 for CTR-X1, CTR-Y1, CTR-Z1; 1 for CTR-X2, CTR-Y2 or CTR-Z2. * 'chan' is the channel: 0, 1 or 2. * 'source' is the signal source: 0 to 7. */ #define CLK_SCE(which, chan, source) (((which) << 5) | ((chan) << 3) | (source)) #define GAT_SCE(which, chan, source) (((which) << 5) | ((chan) << 3) | (source)) /* * Periods of the internal clock sources in nanoseconds. */ static const unsigned clock_period[8] = { 0, /* dedicated clock input/output pin */ 100, /* 10 MHz */ 1000, /* 1 MHz */ 10000, /* 100 kHz */ 100000, /* 10 kHz */ 1000000, /* 1 kHz */ 0, /* OUT N-1 */ 0 /* group clock input pin */ }; /* * Board descriptions. */ enum dio200_bustype { isa_bustype, pci_bustype }; enum dio200_model { pc212e_model, pc214e_model, pc215e_model, pci215_model, pc218e_model, pc272e_model, pci272_model, anypci_model }; enum dio200_layout { pc212_layout, pc214_layout, pc215_layout, pc218_layout, pc272_layout }; struct dio200_board { const char *name; unsigned short devid; enum dio200_bustype bustype; enum dio200_model model; enum dio200_layout layout; }; static const struct dio200_board dio200_boards[] = { { .name = "pc212e", .bustype = isa_bustype, .model = pc212e_model, .layout = pc212_layout, }, { .name = "pc214e", .bustype = isa_bustype, .model = pc214e_model, .layout = pc214_layout, }, { .name = "pc215e", .bustype = isa_bustype, .model = pc215e_model, .layout = pc215_layout, }, #ifdef CONFIG_COMEDI_PCI { .name = "pci215", .devid = PCI_DEVICE_ID_AMPLICON_PCI215, .bustype = pci_bustype, .model = pci215_model, .layout = pc215_layout, }, #endif { .name = "pc218e", .bustype = isa_bustype, .model = pc218e_model, .layout = pc218_layout, }, { .name = "pc272e", .bustype = isa_bustype, .model = pc272e_model, .layout = pc272_layout, }, #ifdef CONFIG_COMEDI_PCI { .name = "pci272", .devid = PCI_DEVICE_ID_AMPLICON_PCI272, .bustype = pci_bustype, .model = pci272_model, .layout = pc272_layout, }, #endif #ifdef CONFIG_COMEDI_PCI { .name = DIO200_DRIVER_NAME, .devid = PCI_DEVICE_ID_INVALID, .bustype = pci_bustype, .model = anypci_model, /* wildcard */ }, #endif }; /* * Layout descriptions - some ISA and PCI board descriptions share the same * layout. */ enum dio200_sdtype { sd_none, sd_intr, sd_8255, sd_8254 }; #define DIO200_MAX_SUBDEVS 7 #define DIO200_MAX_ISNS 6 struct dio200_layout_struct { unsigned short n_subdevs; /* number of subdevices */ unsigned char sdtype[DIO200_MAX_SUBDEVS]; /* enum dio200_sdtype */ unsigned char sdinfo[DIO200_MAX_SUBDEVS]; /* depends on sdtype */ char has_int_sce; /* has interrupt enable/status register */ char has_clk_gat_sce; /* has clock/gate selection registers */ }; static const struct dio200_layout_struct dio200_layouts[] = { [pc212_layout] = { .n_subdevs = 6, .sdtype = {sd_8255, sd_8254, sd_8254, sd_8254, sd_8254, sd_intr}, .sdinfo = {0x00, 0x08, 0x0C, 0x10, 0x14, 0x3F}, .has_int_sce = 1, .has_clk_gat_sce = 1, }, [pc214_layout] = { .n_subdevs = 4, .sdtype = {sd_8255, sd_8255, sd_8254, sd_intr}, .sdinfo = {0x00, 0x08, 0x10, 0x01}, .has_int_sce = 0, .has_clk_gat_sce = 0, }, [pc215_layout] = { .n_subdevs = 5, .sdtype = {sd_8255, sd_8255, sd_8254, sd_8254, sd_intr}, .sdinfo = {0x00, 0x08, 0x10, 0x14, 0x3F}, .has_int_sce = 1, .has_clk_gat_sce = 1, }, [pc218_layout] = { .n_subdevs = 7, .sdtype = {sd_8254, sd_8254, sd_8255, sd_8254, sd_8254, sd_intr}, .sdinfo = {0x00, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x3F}, .has_int_sce = 1, .has_clk_gat_sce = 1, }, [pc272_layout] = { .n_subdevs = 4, .sdtype = {sd_8255, sd_8255, sd_8255, sd_intr}, .sdinfo = {0x00, 0x08, 0x10, 0x3F}, .has_int_sce = 1, .has_clk_gat_sce = 0, }, }; /* * PCI driver table. */ #ifdef CONFIG_COMEDI_PCI static DEFINE_PCI_DEVICE_TABLE(dio200_pci_table) = { { PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI272, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, dio200_pci_table); #endif /* CONFIG_COMEDI_PCI */ /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct dio200_board *)dev->board_ptr) #define thislayout (&dio200_layouts[((struct dio200_board *) \ dev->board_ptr)->layout]) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct dio200_private { #ifdef CONFIG_COMEDI_PCI struct pci_dev *pci_dev; /* PCI device */ #endif int intr_sd; }; #define devpriv ((struct dio200_private *)dev->private) struct dio200_subdev_8254 { unsigned long iobase; /* Counter base address */ unsigned long clk_sce_iobase; /* CLK_SCE base address */ unsigned long gat_sce_iobase; /* GAT_SCE base address */ int which; /* Bit 5 of CLK_SCE or GAT_SCE */ int has_clk_gat_sce; unsigned clock_src[3]; /* Current clock sources */ unsigned gate_src[3]; /* Current gate sources */ spinlock_t spinlock; }; struct dio200_subdev_intr { unsigned long iobase; spinlock_t spinlock; int active; int has_int_sce; unsigned int valid_isns; unsigned int enabled_isns; unsigned int stopcount; int continuous; }; /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dio200_detach(struct comedi_device *dev); static struct comedi_driver driver_amplc_dio200 = { .driver_name = DIO200_DRIVER_NAME, .module = THIS_MODULE, .attach = dio200_attach, .detach = dio200_detach, .board_name = &dio200_boards[0].name, .offset = sizeof(struct dio200_board), .num_names = ARRAY_SIZE(dio200_boards), }; #ifdef CONFIG_COMEDI_PCI static int __devinit driver_amplc_dio200_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_amplc_dio200.driver_name); } static void __devexit driver_amplc_dio200_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_amplc_dio200_pci_driver = { .id_table = dio200_pci_table, .probe = &driver_amplc_dio200_pci_probe, .remove = __devexit_p(&driver_amplc_dio200_pci_remove) }; static int __init driver_amplc_dio200_init_module(void) { int retval; retval = comedi_driver_register(&driver_amplc_dio200); if (retval < 0) return retval; driver_amplc_dio200_pci_driver.name = (char *)driver_amplc_dio200.driver_name; return pci_register_driver(&driver_amplc_dio200_pci_driver); } static void __exit driver_amplc_dio200_cleanup_module(void) { pci_unregister_driver(&driver_amplc_dio200_pci_driver); comedi_driver_unregister(&driver_amplc_dio200); } module_init(driver_amplc_dio200_init_module); module_exit(driver_amplc_dio200_cleanup_module); #else static int __init driver_amplc_dio200_init_module(void) { return comedi_driver_register(&driver_amplc_dio200); } static void __exit driver_amplc_dio200_cleanup_module(void) { comedi_driver_unregister(&driver_amplc_dio200); } module_init(driver_amplc_dio200_init_module); module_exit(driver_amplc_dio200_cleanup_module); #endif /* * This function looks for a PCI device matching the requested board name, * bus and slot. */ #ifdef CONFIG_COMEDI_PCI static int dio200_find_pci(struct comedi_device *dev, int bus, int slot, struct pci_dev **pci_dev_p) { struct pci_dev *pci_dev = NULL; *pci_dev_p = NULL; /* Look for matching PCI device. */ for (pci_dev = pci_get_device(PCI_VENDOR_ID_AMPLICON, PCI_ANY_ID, NULL); pci_dev != NULL; pci_dev = pci_get_device(PCI_VENDOR_ID_AMPLICON, PCI_ANY_ID, pci_dev)) { /* If bus/slot specified, check them. */ if (bus || slot) { if (bus != pci_dev->bus->number || slot != PCI_SLOT(pci_dev->devfn)) continue; } if (thisboard->model == anypci_model) { /* Match any supported model. */ int i; for (i = 0; i < ARRAY_SIZE(dio200_boards); i++) { if (dio200_boards[i].bustype != pci_bustype) continue; if (pci_dev->device == dio200_boards[i].devid) { /* Change board_ptr to matched board. */ dev->board_ptr = &dio200_boards[i]; break; } } if (i == ARRAY_SIZE(dio200_boards)) continue; } else { /* Match specific model name. */ if (pci_dev->device != thisboard->devid) continue; } /* Found a match. */ *pci_dev_p = pci_dev; return 0; } /* No match found. */ if (bus || slot) { printk(KERN_ERR "comedi%d: error! no %s found at pci %02x:%02x!\n", dev->minor, thisboard->name, bus, slot); } else { printk(KERN_ERR "comedi%d: error! no %s found!\n", dev->minor, thisboard->name); } return -EIO; } #endif /* * This function checks and requests an I/O region, reporting an error * if there is a conflict. */ static int dio200_request_region(unsigned minor, unsigned long from, unsigned long extent) { if (!from || !request_region(from, extent, DIO200_DRIVER_NAME)) { printk(KERN_ERR "comedi%d: I/O port conflict (%#lx,%lu)!\n", minor, from, extent); return -EIO; } return 0; } /* * 'insn_bits' function for an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dio200_subdev_intr *subpriv = s->private; if (subpriv->has_int_sce) { /* Just read the interrupt status register. */ data[1] = inb(subpriv->iobase) & subpriv->valid_isns; } else { /* No interrupt status register. */ data[0] = 0; } return 2; } /* * Called to stop acquisition for an 'INTERRUPT' subdevice. */ static void dio200_stop_intr(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; subpriv->active = 0; subpriv->enabled_isns = 0; if (subpriv->has_int_sce) outb(0, subpriv->iobase); } /* * Called to start acquisition for an 'INTERRUPT' subdevice. */ static int dio200_start_intr(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned int n; unsigned isn_bits; struct dio200_subdev_intr *subpriv = s->private; struct comedi_cmd *cmd = &s->async->cmd; int retval = 0; if (!subpriv->continuous && subpriv->stopcount == 0) { /* An empty acquisition! */ s->async->events |= COMEDI_CB_EOA; subpriv->active = 0; retval = 1; } else { /* Determine interrupt sources to enable. */ isn_bits = 0; if (cmd->chanlist) { for (n = 0; n < cmd->chanlist_len; n++) isn_bits |= (1U << CR_CHAN(cmd->chanlist[n])); } isn_bits &= subpriv->valid_isns; /* Enable interrupt sources. */ subpriv->enabled_isns = isn_bits; if (subpriv->has_int_sce) outb(isn_bits, subpriv->iobase); } return retval; } /* * Internal trigger function to start acquisition for an 'INTERRUPT' subdevice. */ static int dio200_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { struct dio200_subdev_intr *subpriv; unsigned long flags; int event = 0; if (trignum != 0) return -EINVAL; subpriv = s->private; spin_lock_irqsave(&subpriv->spinlock, flags); s->async->inttrig = NULL; if (subpriv->active) event = dio200_start_intr(dev, s); spin_unlock_irqrestore(&subpriv->spinlock, flags); if (event) comedi_event(dev, s); return 1; } /* * This is called from the interrupt service routine to handle a read * scan on an 'INTERRUPT' subdevice. */ static int dio200_handle_read_intr(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; unsigned triggered; unsigned intstat; unsigned cur_enabled; unsigned int oldevents; unsigned long flags; triggered = 0; spin_lock_irqsave(&subpriv->spinlock, flags); oldevents = s->async->events; if (subpriv->has_int_sce) { /* * Collect interrupt sources that have triggered and disable * them temporarily. Loop around until no extra interrupt * sources have triggered, at which point, the valid part of * the interrupt status register will read zero, clearing the * cause of the interrupt. * * Mask off interrupt sources already seen to avoid infinite * loop in case of misconfiguration. */ cur_enabled = subpriv->enabled_isns; while ((intstat = (inb(subpriv->iobase) & subpriv->valid_isns & ~triggered)) != 0) { triggered |= intstat; cur_enabled &= ~triggered; outb(cur_enabled, subpriv->iobase); } } else { /* * No interrupt status register. Assume the single interrupt * source has triggered. */ triggered = subpriv->enabled_isns; } if (triggered) { /* * Some interrupt sources have triggered and have been * temporarily disabled to clear the cause of the interrupt. * * Reenable them NOW to minimize the time they are disabled. */ cur_enabled = subpriv->enabled_isns; if (subpriv->has_int_sce) outb(cur_enabled, subpriv->iobase); if (subpriv->active) { /* * The command is still active. * * Ignore interrupt sources that the command isn't * interested in (just in case there's a race * condition). */ if (triggered & subpriv->enabled_isns) { /* Collect scan data. */ short val; unsigned int n, ch, len; val = 0; len = s->async->cmd.chanlist_len; for (n = 0; n < len; n++) { ch = CR_CHAN(s->async->cmd.chanlist[n]); if (triggered & (1U << ch)) val |= (1U << n); } /* Write the scan to the buffer. */ if (comedi_buf_put(s->async, val)) { s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS); } else { /* Error! Stop acquisition. */ dio200_stop_intr(dev, s); s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW; comedi_error(dev, "buffer overflow"); } /* Check for end of acquisition. */ if (!subpriv->continuous) { /* stop_src == TRIG_COUNT */ if (subpriv->stopcount > 0) { subpriv->stopcount--; if (subpriv->stopcount == 0) { s->async->events |= COMEDI_CB_EOA; dio200_stop_intr(dev, s); } } } } } } spin_unlock_irqrestore(&subpriv->spinlock, flags); if (oldevents != s->async->events) comedi_event(dev, s); return (triggered != 0); } /* * 'cancel' function for an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); if (subpriv->active) dio200_stop_intr(dev, s); spin_unlock_irqrestore(&subpriv->spinlock, flags); return 0; } /* * 'do_cmdtest' function for an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; unsigned int tmp; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= (TRIG_NOW | TRIG_INT); if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= (TRIG_COUNT | TRIG_NONE); if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* these tests are true if more than one _src bit is set */ if ((cmd->start_src & (cmd->start_src - 1)) != 0) err++; if ((cmd->scan_begin_src & (cmd->scan_begin_src - 1)) != 0) err++; if ((cmd->convert_src & (cmd->convert_src - 1)) != 0) err++; if ((cmd->scan_end_src & (cmd->scan_end_src - 1)) != 0) err++; if ((cmd->stop_src & (cmd->stop_src - 1)) != 0) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ /* cmd->start_src == TRIG_NOW || cmd->start_src == TRIG_INT */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } /* cmd->scan_begin_src == TRIG_EXT */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } /* cmd->convert_src == TRIG_NOW */ if (cmd->convert_arg != 0) { cmd->convert_arg = 0; err++; } /* cmd->scan_end_src == TRIG_COUNT */ if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } switch (cmd->stop_src) { case TRIG_COUNT: /* any count allowed */ break; case TRIG_NONE: if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } break; default: break; } if (err) return 3; /* step 4: fix up any arguments */ /* if (err) return 4; */ return 0; } /* * 'do_cmd' function for an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; struct dio200_subdev_intr *subpriv = s->private; unsigned long flags; int event = 0; spin_lock_irqsave(&subpriv->spinlock, flags); subpriv->active = 1; /* Set up end of acquisition. */ switch (cmd->stop_src) { case TRIG_COUNT: subpriv->continuous = 0; subpriv->stopcount = cmd->stop_arg; break; default: /* TRIG_NONE */ subpriv->continuous = 1; subpriv->stopcount = 0; break; } /* Set up start of acquisition. */ switch (cmd->start_src) { case TRIG_INT: s->async->inttrig = dio200_inttrig_start_intr; break; default: /* TRIG_NOW */ event = dio200_start_intr(dev, s); break; } spin_unlock_irqrestore(&subpriv->spinlock, flags); if (event) comedi_event(dev, s); return 0; } /* * This function initializes an 'INTERRUPT' subdevice. */ static int dio200_subdev_intr_init(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long iobase, unsigned valid_isns, int has_int_sce) { struct dio200_subdev_intr *subpriv; subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL); if (!subpriv) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return -ENOMEM; } subpriv->iobase = iobase; subpriv->has_int_sce = has_int_sce; subpriv->valid_isns = valid_isns; spin_lock_init(&subpriv->spinlock); if (has_int_sce) outb(0, subpriv->iobase); /* Disable interrupt sources. */ s->private = subpriv; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; if (has_int_sce) { s->n_chan = DIO200_MAX_ISNS; s->len_chanlist = DIO200_MAX_ISNS; } else { /* No interrupt source register. Support single channel. */ s->n_chan = 1; s->len_chanlist = 1; } s->range_table = &range_digital; s->maxdata = 1; s->insn_bits = dio200_subdev_intr_insn_bits; s->do_cmdtest = dio200_subdev_intr_cmdtest; s->do_cmd = dio200_subdev_intr_cmd; s->cancel = dio200_subdev_intr_cancel; return 0; } /* * This function cleans up an 'INTERRUPT' subdevice. */ static void dio200_subdev_intr_cleanup(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; kfree(subpriv); } /* * Interrupt service routine. */ static irqreturn_t dio200_interrupt(int irq, void *d) { struct comedi_device *dev = d; int handled; if (!dev->attached) return IRQ_NONE; if (devpriv->intr_sd >= 0) { handled = dio200_handle_read_intr(dev, dev->subdevices + devpriv->intr_sd); } else { handled = 0; } return IRQ_RETVAL(handled); } /* * Handle 'insn_read' for an '8254' counter subdevice. */ static int dio200_subdev_8254_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dio200_subdev_8254 *subpriv = s->private; int chan = CR_CHAN(insn->chanspec); unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); data[0] = i8254_read(subpriv->iobase, 0, chan); spin_unlock_irqrestore(&subpriv->spinlock, flags); return 1; } /* * Handle 'insn_write' for an '8254' counter subdevice. */ static int dio200_subdev_8254_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dio200_subdev_8254 *subpriv = s->private; int chan = CR_CHAN(insn->chanspec); unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); i8254_write(subpriv->iobase, 0, chan, data[0]); spin_unlock_irqrestore(&subpriv->spinlock, flags); return 1; } /* * Set gate source for an '8254' counter subdevice channel. */ static int dio200_set_gate_src(struct dio200_subdev_8254 *subpriv, unsigned int counter_number, unsigned int gate_src) { unsigned char byte; if (!subpriv->has_clk_gat_sce) return -1; if (counter_number > 2) return -1; if (gate_src > 7) return -1; subpriv->gate_src[counter_number] = gate_src; byte = GAT_SCE(subpriv->which, counter_number, gate_src); outb(byte, subpriv->gat_sce_iobase); return 0; } /* * Get gate source for an '8254' counter subdevice channel. */ static int dio200_get_gate_src(struct dio200_subdev_8254 *subpriv, unsigned int counter_number) { if (!subpriv->has_clk_gat_sce) return -1; if (counter_number > 2) return -1; return subpriv->gate_src[counter_number]; } /* * Set clock source for an '8254' counter subdevice channel. */ static int dio200_set_clock_src(struct dio200_subdev_8254 *subpriv, unsigned int counter_number, unsigned int clock_src) { unsigned char byte; if (!subpriv->has_clk_gat_sce) return -1; if (counter_number > 2) return -1; if (clock_src > 7) return -1; subpriv->clock_src[counter_number] = clock_src; byte = CLK_SCE(subpriv->which, counter_number, clock_src); outb(byte, subpriv->clk_sce_iobase); return 0; } /* * Get clock source for an '8254' counter subdevice channel. */ static int dio200_get_clock_src(struct dio200_subdev_8254 *subpriv, unsigned int counter_number, unsigned int *period_ns) { unsigned clock_src; if (!subpriv->has_clk_gat_sce) return -1; if (counter_number > 2) return -1; clock_src = subpriv->clock_src[counter_number]; *period_ns = clock_period[clock_src]; return clock_src; } /* * Handle 'insn_config' for an '8254' counter subdevice. */ static int dio200_subdev_8254_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct dio200_subdev_8254 *subpriv = s->private; int ret = 0; int chan = CR_CHAN(insn->chanspec); unsigned long flags; spin_lock_irqsave(&subpriv->spinlock, flags); switch (data[0]) { case INSN_CONFIG_SET_COUNTER_MODE: ret = i8254_set_mode(subpriv->iobase, 0, chan, data[1]); if (ret < 0) ret = -EINVAL; break; case INSN_CONFIG_8254_READ_STATUS: data[1] = i8254_status(subpriv->iobase, 0, chan); break; case INSN_CONFIG_SET_GATE_SRC: ret = dio200_set_gate_src(subpriv, chan, data[2]); if (ret < 0) ret = -EINVAL; break; case INSN_CONFIG_GET_GATE_SRC: ret = dio200_get_gate_src(subpriv, chan); if (ret < 0) { ret = -EINVAL; break; } data[2] = ret; break; case INSN_CONFIG_SET_CLOCK_SRC: ret = dio200_set_clock_src(subpriv, chan, data[1]); if (ret < 0) ret = -EINVAL; break; case INSN_CONFIG_GET_CLOCK_SRC: ret = dio200_get_clock_src(subpriv, chan, &data[2]); if (ret < 0) { ret = -EINVAL; break; } data[1] = ret; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&subpriv->spinlock, flags); return ret < 0 ? ret : insn->n; } /* * This function initializes an '8254' counter subdevice. * * Note: iobase is the base address of the board, not the subdevice; * offset is the offset to the 8254 chip. */ static int dio200_subdev_8254_init(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long iobase, unsigned offset, int has_clk_gat_sce) { struct dio200_subdev_8254 *subpriv; unsigned int chan; subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL); if (!subpriv) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return -ENOMEM; } s->private = subpriv; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 3; s->maxdata = 0xFFFF; s->insn_read = dio200_subdev_8254_read; s->insn_write = dio200_subdev_8254_write; s->insn_config = dio200_subdev_8254_config; spin_lock_init(&subpriv->spinlock); subpriv->iobase = offset + iobase; subpriv->has_clk_gat_sce = has_clk_gat_sce; if (has_clk_gat_sce) { /* Derive CLK_SCE and GAT_SCE register offsets from * 8254 offset. */ subpriv->clk_sce_iobase = DIO200_XCLK_SCE + (offset >> 3) + iobase; subpriv->gat_sce_iobase = DIO200_XGAT_SCE + (offset >> 3) + iobase; subpriv->which = (offset >> 2) & 1; } /* Initialize channels. */ for (chan = 0; chan < 3; chan++) { i8254_set_mode(subpriv->iobase, 0, chan, I8254_MODE0 | I8254_BINARY); if (subpriv->has_clk_gat_sce) { /* Gate source 0 is VCC (logic 1). */ dio200_set_gate_src(subpriv, chan, 0); /* Clock source 0 is the dedicated clock input. */ dio200_set_clock_src(subpriv, chan, 0); } } return 0; } /* * This function cleans up an '8254' counter subdevice. */ static void dio200_subdev_8254_cleanup(struct comedi_device *dev, struct comedi_subdevice *s) { struct dio200_subdev_intr *subpriv = s->private; kfree(subpriv); } /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase = 0; unsigned int irq = 0; #ifdef CONFIG_COMEDI_PCI struct pci_dev *pci_dev = NULL; int bus = 0, slot = 0; #endif const struct dio200_layout_struct *layout; int share_irq = 0; int sdx; unsigned n; int ret; printk(KERN_DEBUG "comedi%d: %s: attach\n", dev->minor, DIO200_DRIVER_NAME); ret = alloc_private(dev, sizeof(struct dio200_private)); if (ret < 0) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return ret; } /* Process options. */ switch (thisboard->bustype) { case isa_bustype: iobase = it->options[0]; irq = it->options[1]; share_irq = 0; break; #ifdef CONFIG_COMEDI_PCI case pci_bustype: bus = it->options[0]; slot = it->options[1]; share_irq = 1; ret = dio200_find_pci(dev, bus, slot, &pci_dev); if (ret < 0) return ret; devpriv->pci_dev = pci_dev; break; #endif default: printk(KERN_ERR "comedi%d: %s: BUG! cannot determine board type!\n", dev->minor, DIO200_DRIVER_NAME); return -EINVAL; break; } devpriv->intr_sd = -1; /* Enable device and reserve I/O spaces. */ #ifdef CONFIG_COMEDI_PCI if (pci_dev) { ret = comedi_pci_enable(pci_dev, DIO200_DRIVER_NAME); if (ret < 0) { printk(KERN_ERR "comedi%d: error! cannot enable PCI device and request regions!\n", dev->minor); return ret; } iobase = pci_resource_start(pci_dev, 2); irq = pci_dev->irq; } else #endif { ret = dio200_request_region(dev->minor, iobase, DIO200_IO_SIZE); if (ret < 0) return ret; } dev->iobase = iobase; layout = thislayout; ret = alloc_subdevices(dev, layout->n_subdevs); if (ret < 0) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return ret; } for (n = 0; n < dev->n_subdevices; n++) { s = &dev->subdevices[n]; switch (layout->sdtype[n]) { case sd_8254: /* counter subdevice (8254) */ ret = dio200_subdev_8254_init(dev, s, iobase, layout->sdinfo[n], layout->has_clk_gat_sce); if (ret < 0) return ret; break; case sd_8255: /* digital i/o subdevice (8255) */ ret = subdev_8255_init(dev, s, NULL, iobase + layout->sdinfo[n]); if (ret < 0) return ret; break; case sd_intr: /* 'INTERRUPT' subdevice */ if (irq) { ret = dio200_subdev_intr_init(dev, s, iobase + DIO200_INT_SCE, layout->sdinfo[n], layout-> has_int_sce); if (ret < 0) return ret; devpriv->intr_sd = n; } else { s->type = COMEDI_SUBD_UNUSED; } break; default: s->type = COMEDI_SUBD_UNUSED; break; } } sdx = devpriv->intr_sd; if (sdx >= 0 && sdx < dev->n_subdevices) dev->read_subdev = &dev->subdevices[sdx]; dev->board_name = thisboard->name; if (irq) { unsigned long flags = share_irq ? IRQF_SHARED : 0; if (request_irq(irq, dio200_interrupt, flags, DIO200_DRIVER_NAME, dev) >= 0) { dev->irq = irq; } else { printk(KERN_WARNING "comedi%d: warning! irq %u unavailable!\n", dev->minor, irq); } } printk(KERN_INFO "comedi%d: %s ", dev->minor, dev->board_name); if (thisboard->bustype == isa_bustype) { printk("(base %#lx) ", iobase); } else { #ifdef CONFIG_COMEDI_PCI printk("(pci %s) ", pci_name(pci_dev)); #endif } if (irq) printk("(irq %u%s) ", irq, (dev->irq ? "" : " UNAVAILABLE")); else printk("(no irq) "); printk("attached\n"); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int dio200_detach(struct comedi_device *dev) { const struct dio200_layout_struct *layout; unsigned n; printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor, DIO200_DRIVER_NAME); if (dev->irq) free_irq(dev->irq, dev); if (dev->subdevices) { layout = thislayout; for (n = 0; n < dev->n_subdevices; n++) { struct comedi_subdevice *s = &dev->subdevices[n]; switch (layout->sdtype[n]) { case sd_8254: dio200_subdev_8254_cleanup(dev, s); break; case sd_8255: subdev_8255_cleanup(dev, s); break; case sd_intr: dio200_subdev_intr_cleanup(dev, s); break; default: break; } } } if (devpriv) { #ifdef CONFIG_COMEDI_PCI if (devpriv->pci_dev) { if (dev->iobase) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } else #endif { if (dev->iobase) release_region(dev->iobase, DIO200_IO_SIZE); } } if (dev->board_name) printk(KERN_INFO "comedi%d: %s removed\n", dev->minor, dev->board_name); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
aatjitra/cm12
arch/arm/mach-imx/mx31lite-db.c
5434
5122
/* * LogicPD i.MX31 SOM-LV development board support * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * based on code for other MX31 boards, * * Copyright 2005-2007 Freescale Semiconductor * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> * Copyright (C) 2009 Valentin Longchamp, EPFL Mobots group * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/common.h> #include <mach/iomux-mx3.h> #include <mach/board-mx31lite.h> #include "devices-imx31.h" /* * This file contains board-specific initialization routines for the * LogicPD i.MX31 SOM-LV development board, aka 'LiteKit'. * If you design an own baseboard for the module, use this file as base * for support code. */ static unsigned int litekit_db_board_pins[] __initdata = { /* UART1 */ MX31_PIN_CTS1__CTS1, MX31_PIN_RTS1__RTS1, MX31_PIN_TXD1__TXD1, MX31_PIN_RXD1__RXD1, /* SPI 0 */ MX31_PIN_CSPI1_SCLK__SCLK, MX31_PIN_CSPI1_MOSI__MOSI, MX31_PIN_CSPI1_MISO__MISO, MX31_PIN_CSPI1_SPI_RDY__SPI_RDY, MX31_PIN_CSPI1_SS0__SS0, MX31_PIN_CSPI1_SS1__SS1, MX31_PIN_CSPI1_SS2__SS2, /* SDHC1 */ MX31_PIN_SD1_DATA0__SD1_DATA0, MX31_PIN_SD1_DATA1__SD1_DATA1, MX31_PIN_SD1_DATA2__SD1_DATA2, MX31_PIN_SD1_DATA3__SD1_DATA3, MX31_PIN_SD1_CLK__SD1_CLK, MX31_PIN_SD1_CMD__SD1_CMD, }; /* UART */ static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; /* MMC */ static int gpio_det, gpio_wp; #define MMC_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ PAD_CTL_ODE_CMOS) static int mxc_mmc1_get_ro(struct device *dev) { return gpio_get_value(IOMUX_TO_GPIO(MX31_PIN_GPIO1_6)); } static int mxc_mmc1_init(struct device *dev, irq_handler_t detect_irq, void *data) { int ret; gpio_det = IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1); gpio_wp = IOMUX_TO_GPIO(MX31_PIN_GPIO1_6); mxc_iomux_set_pad(MX31_PIN_SD1_DATA0, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_DATA1, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_DATA2, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_DATA3, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_CMD, MMC_PAD_CFG | PAD_CTL_PUE_PUD | PAD_CTL_100K_PU); mxc_iomux_set_pad(MX31_PIN_SD1_CLK, MMC_PAD_CFG); ret = gpio_request(gpio_det, "MMC detect"); if (ret) return ret; ret = gpio_request(gpio_wp, "MMC w/p"); if (ret) goto exit_free_det; gpio_direction_input(gpio_det); gpio_direction_input(gpio_wp); ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_DCD_DCE1), detect_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "MMC detect", data); if (ret) goto exit_free_wp; return 0; exit_free_wp: gpio_free(gpio_wp); exit_free_det: gpio_free(gpio_det); return ret; } static void mxc_mmc1_exit(struct device *dev, void *data) { gpio_free(gpio_det); gpio_free(gpio_wp); free_irq(IOMUX_TO_IRQ(MX31_PIN_DCD_DCE1), data); } static const struct imxmmc_platform_data mmc_pdata __initconst = { .get_ro = mxc_mmc1_get_ro, .init = mxc_mmc1_init, .exit = mxc_mmc1_exit, }; /* SPI */ static int spi_internal_chipselect[] = { MXC_SPI_CS(0), MXC_SPI_CS(1), MXC_SPI_CS(2), }; static const struct spi_imx_master spi0_pdata __initconst = { .chipselect = spi_internal_chipselect, .num_chipselect = ARRAY_SIZE(spi_internal_chipselect), }; /* GPIO LEDs */ static const struct gpio_led litekit_leds[] __initconst = { { .name = "GPIO0", .gpio = IOMUX_TO_GPIO(MX31_PIN_COMPARE), .active_low = 1, .default_state = LEDS_GPIO_DEFSTATE_OFF, }, { .name = "GPIO1", .gpio = IOMUX_TO_GPIO(MX31_PIN_CAPTURE), .active_low = 1, .default_state = LEDS_GPIO_DEFSTATE_OFF, } }; static const struct gpio_led_platform_data litekit_led_platform_data __initconst = { .leds = litekit_leds, .num_leds = ARRAY_SIZE(litekit_leds), }; void __init mx31lite_db_init(void) { mxc_iomux_setup_multiple_pins(litekit_db_board_pins, ARRAY_SIZE(litekit_db_board_pins), "development board pins"); imx31_add_imx_uart0(&uart_pdata); imx31_add_mxc_mmc(0, &mmc_pdata); imx31_add_spi_imx0(&spi0_pdata); gpio_led_register_device(-1, &litekit_led_platform_data); imx31_add_imx2_wdt(NULL); imx31_add_mxc_rtc(NULL); }
gpl-2.0
Docker-J/Sail_STOCK
arch/arm/mach-omap2/clkt2xxx_dpll.c
7994
1448
/* * OMAP2-specific DPLL control functions * * Copyright (C) 2011 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include "clock.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" /* Private functions */ /** * _allow_idle - enable DPLL autoidle bits * @clk: struct clk * of the DPLL to operate on * * Enable DPLL automatic idle control. The DPLL will enter low-power * stop when its downstream clocks are gated. No return value. * REVISIT: DPLL can optionally enter low-power bypass by writing 0x1 * instead. Add some mechanism to optionally enter this mode. */ static void _allow_idle(struct clk *clk) { if (!clk || !clk->dpll_data) return; omap2xxx_cm_set_dpll_auto_low_power_stop(); } /** * _deny_idle - prevent DPLL from automatically idling * @clk: struct clk * of the DPLL to operate on * * Disable DPLL automatic idle control. No return value. */ static void _deny_idle(struct clk *clk) { if (!clk || !clk->dpll_data) return; omap2xxx_cm_set_dpll_disable_autoidle(); } /* Public data */ const struct clkops clkops_omap2xxx_dpll_ops = { .allow_idle = _allow_idle, .deny_idle = _deny_idle, };
gpl-2.0
kalxas/QGIS
external/o2/src/o2facebook.cpp
59
3318
#include <QDebug> #include <QMap> #include <QNetworkReply> #include <QString> #include <QStringList> #include <QUrl> #if QT_VERSION >= 0x050000 #include <QUrlQuery> #endif #include "o2facebook.h" #include "o0globals.h" static const char *FbEndpoint = "https://graph.facebook.com/oauth/authorize?display=touch"; static const char *FbTokenUrl = "https://graph.facebook.com/oauth/access_token"; static const char *FbExpiresKey = "expires"; O2Facebook::O2Facebook(QObject *parent): O2(parent) { setRequestUrl(FbEndpoint); setTokenUrl(FbTokenUrl); } void O2Facebook::onVerificationReceived(const QMap<QString, QString> response) { qDebug() << "O2Facebook::onVerificationReceived: Emitting closeBrowser()"; Q_EMIT closeBrowser(); if (response.contains("error")) { qWarning() << "O2Facebook::onVerificationReceived: Verification failed"; foreach (QString key, response.keys()) { qWarning() << "O2Facebook::onVerificationReceived:" << key << response.value(key); } Q_EMIT linkingFailed(); return; } // Save access code setCode(response.value(O2_OAUTH2_GRANT_TYPE_CODE)); // Exchange access code for access/refresh tokens QUrl url(tokenUrl_); #if QT_VERSION < 0x050000 url.addQueryItem(O2_OAUTH2_CLIENT_ID, clientId_); url.addQueryItem(O2_OAUTH2_CLIENT_SECRET, clientSecret_); url.addQueryItem(O2_OAUTH2_SCOPE, scope_); url.addQueryItem(O2_OAUTH2_GRANT_TYPE_CODE, code()); url.addQueryItem(O2_OAUTH2_REDIRECT_URI, redirectUri_); #else QUrlQuery query(url); query.addQueryItem(O2_OAUTH2_CLIENT_ID, clientId_); query.addQueryItem(O2_OAUTH2_CLIENT_SECRET, clientSecret_); query.addQueryItem(O2_OAUTH2_SCOPE, scope_); query.addQueryItem(O2_OAUTH2_GRANT_TYPE_CODE, code()); query.addQueryItem(O2_OAUTH2_REDIRECT_URI, redirectUri_); url.setQuery(query); #endif QNetworkRequest tokenRequest(url); QNetworkReply *tokenReply = manager_->get(tokenRequest); timedReplies_.add(tokenReply); connect(tokenReply, SIGNAL(finished()), this, SLOT(onTokenReplyFinished()), Qt::QueuedConnection); connect(tokenReply, SIGNAL(error(QNetworkReply::NetworkError)), this, SLOT(onTokenReplyError(QNetworkReply::NetworkError)), Qt::QueuedConnection); } void O2Facebook::onTokenReplyFinished() { qDebug() << "O2Facebook::onTokenReplyFinished"; QNetworkReply *tokenReply = qobject_cast<QNetworkReply *>(sender()); if (tokenReply->error() == QNetworkReply::NoError) { // Process reply QByteArray replyData = tokenReply->readAll(); QVariantMap reply; foreach (QString pair, QString(replyData).split("&")) { QStringList kv = pair.split("="); if (kv.length() == 2) { reply.insert(kv[0], kv[1]); } } // Interpret reply setToken(reply.value(O2_OAUTH2_ACCESS_TOKEN, QString()).toString()); setExpires(reply.value(FbExpiresKey).toInt()); setRefreshToken(reply.value(O2_OAUTH2_REFRESH_TOKEN, QString()).toString()); setExtraTokens(reply); timedReplies_.remove(tokenReply); setLinked(true); Q_EMIT linkingSucceeded(); } else { qWarning() << "O2Facebook::onTokenReplyFinished:" << tokenReply->errorString(); } }
gpl-2.0
invisiblek/android_kernel_lge_vs450pp
drivers/input/misc/mpu3050.c
59
24174
/* * MPU3050 Tri-axis gyroscope driver * * Copyright (C) 2011 Wistron Co.Ltd * Joseph Lai <joseph_lai@wistron.com> * * Trimmed down by Alan Cox <alan@linux.intel.com> to produce this version * * This is a 'lite' version of the driver, while we consider the right way * to present the other features to user space. In particular it requires the * device has an IRQ, and it only provides an input interface, so is not much * use for device orientation. A fuller version is available from the Meego * tree. * * This program is based on bma023.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/sensors.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/gpio.h> #include <linux/input/mpu3050.h> #include <linux/regulator/consumer.h> #include <linux/of_gpio.h> #include <mach/gpiomux.h> #define MPU3050_AUTO_DELAY 1000 #define MPU3050_MIN_VALUE -32768 #define MPU3050_MAX_VALUE 32767 #define MPU3050_MIN_POLL_INTERVAL 1 #define MPU3050_MAX_POLL_INTERVAL 250 #define MPU3050_DEFAULT_POLL_INTERVAL 200 #define MPU3050_DEFAULT_FS_RANGE 3 /* Register map */ #define MPU3050_CHIP_ID_REG 0x00 #define MPU3050_SMPLRT_DIV 0x15 #define MPU3050_DLPF_FS_SYNC 0x16 #define MPU3050_INT_CFG 0x17 #define MPU3050_XOUT_H 0x1D #define MPU3050_PWR_MGM 0x3E #define MPU3050_PWR_MGM_POS 6 /* Register bits */ /* DLPF_FS_SYNC */ #define MPU3050_EXT_SYNC_NONE 0x00 #define MPU3050_EXT_SYNC_TEMP 0x20 #define MPU3050_EXT_SYNC_GYROX 0x40 #define MPU3050_EXT_SYNC_GYROY 0x60 #define MPU3050_EXT_SYNC_GYROZ 0x80 #define MPU3050_EXT_SYNC_ACCELX 0xA0 #define MPU3050_EXT_SYNC_ACCELY 0xC0 #define MPU3050_EXT_SYNC_ACCELZ 0xE0 #define MPU3050_EXT_SYNC_MASK 0xE0 #define MPU3050_FS_250DPS 0x00 #define MPU3050_FS_500DPS 0x08 #define MPU3050_FS_1000DPS 0x10 #define MPU3050_FS_2000DPS 0x18 #define MPU3050_FS_MASK 0x18 #define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00 #define MPU3050_DLPF_CFG_188HZ 0x01 #define MPU3050_DLPF_CFG_98HZ 0x02 #define MPU3050_DLPF_CFG_42HZ 0x03 #define MPU3050_DLPF_CFG_20HZ 0x04 #define MPU3050_DLPF_CFG_10HZ 0x05 #define MPU3050_DLPF_CFG_5HZ 0x06 #define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07 #define MPU3050_DLPF_CFG_MASK 0x07 /* INT_CFG */ #define MPU3050_RAW_RDY_EN 0x01 #define MPU3050_MPU_RDY_EN 0x04 #define MPU3050_LATCH_INT_EN 0x20 #define MPU3050_OPEN_DRAIN 0x40 #define MPU3050_ACTIVE_LOW 0x80 /* PWR_MGM */ #define MPU3050_PWR_MGM_PLL_X 0x01 #define MPU3050_PWR_MGM_PLL_Y 0x02 #define MPU3050_PWR_MGM_PLL_Z 0x03 #define MPU3050_PWR_MGM_CLKSEL 0x07 #define MPU3050_PWR_MGM_STBY_ZG 0x08 #define MPU3050_PWR_MGM_STBY_YG 0x10 #define MPU3050_PWR_MGM_STBY_XG 0x20 #define MPU3050_PWR_MGM_SLEEP 0x40 #define MPU3050_PWR_MGM_RESET 0x80 #define MPU3050_PWR_MGM_MASK 0x40 struct axis_data { s16 x; s16 y; s16 z; }; struct mpu3050_sensor { struct i2c_client *client; struct device *dev; struct input_dev *idev; struct mpu3050_gyro_platform_data *platform_data; struct delayed_work input_work; u32 use_poll; u32 poll_interval; u32 dlpf_index; u32 enable_gpio; u32 enable; }; static struct sensors_classdev sensors_cdev = { .name = "mpu3050-gyro", .vendor = "Invensense", .version = 1, .handle = SENSORS_GYROSCOPE_HANDLE, .type = SENSOR_TYPE_GYROSCOPE, .max_range = "35.0", .resolution = "0.06", .sensor_power = "0.2", .min_delay = 2000, .fifo_reserved_event_count = 0, .fifo_max_event_count = 0, }; struct sensor_regulator { struct regulator *vreg; const char *name; u32 min_uV; u32 max_uV; }; struct sensor_regulator mpu_vreg[] = { {NULL, "vdd", 2100000, 3600000}, {NULL, "vlogic", 1800000, 1800000}, }; static const int mpu3050_chip_ids[] = { 0x68, 0x69, }; struct dlpf_cfg_tb { u8 cfg; /* cfg index */ u32 lpf_bw; /* low pass filter bandwidth in Hz */ u32 sample_rate; /* analog sample rate in Khz, 1 or 8 */ }; static struct dlpf_cfg_tb dlpf_table[] = { {6, 5, 1}, {5, 10, 1}, {4, 20, 1}, {3, 42, 1}, {2, 98, 1}, {1, 188, 1}, {0, 256, 8}, }; static u8 interval_to_dlpf_cfg(u32 interval) { u32 sample_rate = 1000 / interval; u32 i; /* the filter bandwidth needs to be greater or * equal to half of the sample rate */ for (i = 0; i < sizeof(dlpf_table)/sizeof(dlpf_table[0]); i++) { if (dlpf_table[i].lpf_bw * 2 >= sample_rate) return i; } /* return the maximum possible */ return --i; } static int mpu3050_config_regulator(struct i2c_client *client, bool on) { int rc = 0, i; int num_reg = sizeof(mpu_vreg) / sizeof(struct sensor_regulator); if (on) { for (i = 0; i < num_reg; i++) { mpu_vreg[i].vreg = regulator_get(&client->dev, mpu_vreg[i].name); if (IS_ERR(mpu_vreg[i].vreg)) { rc = PTR_ERR(mpu_vreg[i].vreg); pr_err("%s:regulator get failed rc=%d\n", __func__, rc); mpu_vreg[i].vreg = NULL; goto error_vdd; } if (regulator_count_voltages(mpu_vreg[i].vreg) > 0) { rc = regulator_set_voltage(mpu_vreg[i].vreg, mpu_vreg[i].min_uV, mpu_vreg[i].max_uV); if (rc) { pr_err("%s:set_voltage failed rc=%d\n", __func__, rc); regulator_put(mpu_vreg[i].vreg); mpu_vreg[i].vreg = NULL; goto error_vdd; } } rc = regulator_enable(mpu_vreg[i].vreg); if (rc) { pr_err("%s: regulator_enable failed rc =%d\n", __func__, rc); if (regulator_count_voltages( mpu_vreg[i].vreg) > 0) { regulator_set_voltage(mpu_vreg[i].vreg, 0, mpu_vreg[i].max_uV); } regulator_put(mpu_vreg[i].vreg); mpu_vreg[i].vreg = NULL; goto error_vdd; } } return rc; } else { i = num_reg; } error_vdd: while (--i >= 0) { if (!IS_ERR_OR_NULL(mpu_vreg[i].vreg)) { if (regulator_count_voltages( mpu_vreg[i].vreg) > 0) { regulator_set_voltage(mpu_vreg[i].vreg, 0, mpu_vreg[i].max_uV); } regulator_disable(mpu_vreg[i].vreg); regulator_put(mpu_vreg[i].vreg); mpu_vreg[i].vreg = NULL; } } return rc; } /** * mpu3050_attr_get_polling_rate - get the sampling rate */ static ssize_t mpu3050_attr_get_polling_rate(struct device *dev, struct device_attribute *attr, char *buf) { int val; struct mpu3050_sensor *sensor = dev_get_drvdata(dev); val = sensor ? sensor->poll_interval : 0; return snprintf(buf, 8, "%d\n", val); } /** * mpu3050_attr_set_polling_rate - set the sampling rate */ static ssize_t mpu3050_attr_set_polling_rate(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct mpu3050_sensor *sensor = dev_get_drvdata(dev); unsigned long interval_ms; unsigned int dlpf_index; u8 divider, reg; int ret; if (kstrtoul(buf, 10, &interval_ms)) return -EINVAL; if ((interval_ms < MPU3050_MIN_POLL_INTERVAL) || (interval_ms > MPU3050_MAX_POLL_INTERVAL)) return -EINVAL; dlpf_index = interval_to_dlpf_cfg(interval_ms); divider = interval_ms * dlpf_table[dlpf_index].sample_rate - 1; if (sensor->dlpf_index != dlpf_index) { /* Set low pass filter and full scale */ reg = dlpf_table[dlpf_index].cfg; reg |= MPU3050_DEFAULT_FS_RANGE << 3; reg |= MPU3050_EXT_SYNC_NONE << 5; ret = i2c_smbus_write_byte_data(sensor->client, MPU3050_DLPF_FS_SYNC, reg); if (ret == 0) sensor->dlpf_index = dlpf_index; } if (sensor->poll_interval != interval_ms) { /* Output frequency divider. The poll interval */ ret = i2c_smbus_write_byte_data(sensor->client, MPU3050_SMPLRT_DIV, divider); if (ret == 0) sensor->poll_interval = interval_ms; } return size; } /** * Set/get enable function is just needed by sensor HAL. */ static ssize_t mpu3050_attr_set_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mpu3050_sensor *sensor = dev_get_drvdata(dev); unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; sensor->enable = (u32)val == 0 ? 0 : 1; if (sensor->enable) { pm_runtime_get_sync(sensor->dev); gpio_set_value(sensor->enable_gpio, 1); if (sensor->use_poll) schedule_delayed_work(&sensor->input_work, msecs_to_jiffies(sensor->poll_interval)); else { i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG, MPU3050_ACTIVE_LOW | MPU3050_OPEN_DRAIN | MPU3050_RAW_RDY_EN); enable_irq(sensor->client->irq); } } else { if (sensor->use_poll) cancel_delayed_work_sync(&sensor->input_work); else disable_irq(sensor->client->irq); gpio_set_value(sensor->enable_gpio, 0); pm_runtime_put(sensor->dev); } return count; } static ssize_t mpu3050_attr_get_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct mpu3050_sensor *sensor = dev_get_drvdata(dev); return snprintf(buf, 4, "%d\n", sensor->enable); } static struct device_attribute attributes[] = { __ATTR(pollrate_ms, 0664, mpu3050_attr_get_polling_rate, mpu3050_attr_set_polling_rate), __ATTR(enable, 0644, mpu3050_attr_get_enable, mpu3050_attr_set_enable), }; static int create_sysfs_interfaces(struct device *dev) { int i; int err; for (i = 0; i < ARRAY_SIZE(attributes); i++) { err = device_create_file(dev, attributes + i); if (err) goto error; } return 0; error: for ( ; i >= 0; i--) device_remove_file(dev, attributes + i); dev_err(dev, "%s:Unable to create interface\n", __func__); return err; } static int remove_sysfs_interfaces(struct device *dev) { int i; for (i = 0; i < ARRAY_SIZE(attributes); i++) device_remove_file(dev, attributes + i); return 0; } /** * mpu3050_xyz_read_reg - read the axes values * @buffer: provide register addr and get register * @length: length of register * * Reads the register values in one transaction or returns a negative * error code on failure. */ static int mpu3050_xyz_read_reg(struct i2c_client *client, u8 *buffer, int length) { /* * Annoying we can't make this const because the i2c layer doesn't * declare input buffers const. */ char cmd = MPU3050_XOUT_H; struct i2c_msg msg[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = &cmd, }, { .addr = client->addr, .flags = I2C_M_RD, .len = length, .buf = buffer, }, }; return i2c_transfer(client->adapter, msg, 2); } /** * mpu3050_read_xyz - get co-ordinates from device * @client: i2c address of sensor * @coords: co-ordinates to update * * Return the converted X Y and Z co-ordinates from the sensor device */ static void mpu3050_read_xyz(struct i2c_client *client, struct axis_data *coords) { u16 buffer[3]; mpu3050_xyz_read_reg(client, (u8 *)buffer, 6); coords->x = be16_to_cpu(buffer[0]); coords->y = be16_to_cpu(buffer[1]); coords->z = be16_to_cpu(buffer[2]); dev_dbg(&client->dev, "%s: x %d, y %d, z %d\n", __func__, coords->x, coords->y, coords->z); } /** * mpu3050_set_power_mode - set the power mode * @client: i2c client for the sensor * @val: value to switch on/off of power, 1: normal power, 0: low power * * Put device to normal-power mode or low-power mode. */ static void mpu3050_set_power_mode(struct i2c_client *client, u8 val) { u8 value; struct mpu3050_sensor *sensor = i2c_get_clientdata(client); if (val) { mpu3050_config_regulator(client, 1); udelay(10); gpio_set_value(sensor->enable_gpio, 1); msleep(60); } value = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM); value = (value & ~MPU3050_PWR_MGM_MASK) | (((val << MPU3050_PWR_MGM_POS) & MPU3050_PWR_MGM_MASK) ^ MPU3050_PWR_MGM_MASK); i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, value); if (!val) { udelay(10); gpio_set_value(sensor->enable_gpio, 0); udelay(10); mpu3050_config_regulator(client, 0); } } /** * mpu3050_input_open - called on input event open * @input: input dev of opened device * * The input layer calls this function when input event is opened. The * function will push the device to resume. Then, the device is ready * to provide data. */ static int mpu3050_input_open(struct input_dev *input) { struct mpu3050_sensor *sensor = input_get_drvdata(input); int error; pm_runtime_get_sync(sensor->dev); /* Enable interrupts */ error = i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG, MPU3050_ACTIVE_LOW | MPU3050_OPEN_DRAIN | MPU3050_RAW_RDY_EN); if (error < 0) { pm_runtime_put(sensor->dev); return error; } if (sensor->use_poll) schedule_delayed_work(&sensor->input_work, msecs_to_jiffies(sensor->poll_interval)); return 0; } /** * mpu3050_input_close - called on input event close * @input: input dev of closed device * * The input layer calls this function when input event is closed. The * function will push the device to suspend. */ static void mpu3050_input_close(struct input_dev *input) { struct mpu3050_sensor *sensor = input_get_drvdata(input); if (sensor->use_poll) cancel_delayed_work_sync(&sensor->input_work); pm_runtime_put(sensor->dev); } /** * mpu3050_interrupt_thread - handle an IRQ * @irq: interrupt numner * @data: the sensor * * Called by the kernel single threaded after an interrupt occurs. Read * the sensor data and generate an input event for it. */ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data) { struct mpu3050_sensor *sensor = data; struct axis_data axis; mpu3050_read_xyz(sensor->client, &axis); input_report_abs(sensor->idev, ABS_X, axis.x); input_report_abs(sensor->idev, ABS_Y, axis.y); input_report_abs(sensor->idev, ABS_Z, axis.z); input_sync(sensor->idev); return IRQ_HANDLED; } /** * mpu3050_input_work_fn - polling work * @work: the work struct * * Called by the work queue; read sensor data and generate an input * event */ static void mpu3050_input_work_fn(struct work_struct *work) { struct mpu3050_sensor *sensor; struct axis_data axis; sensor = container_of((struct delayed_work *)work, struct mpu3050_sensor, input_work); mpu3050_read_xyz(sensor->client, &axis); input_report_abs(sensor->idev, ABS_X, axis.x); input_report_abs(sensor->idev, ABS_Y, axis.y); input_report_abs(sensor->idev, ABS_Z, axis.z); input_sync(sensor->idev); if (sensor->use_poll) schedule_delayed_work(&sensor->input_work, msecs_to_jiffies(sensor->poll_interval)); } /** * mpu3050_hw_init - initialize hardware * @sensor: the sensor * * Called during device probe; configures the sampling method. */ static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor) { struct i2c_client *client = sensor->client; int ret; u8 reg; /* Reset */ ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, MPU3050_PWR_MGM_RESET); if (ret < 0) return ret; ret = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM); if (ret < 0) return ret; ret &= ~MPU3050_PWR_MGM_CLKSEL; ret |= MPU3050_PWR_MGM_PLL_Z; ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, ret); if (ret < 0) return ret; /* Output frequency divider. The poll interval */ ret = i2c_smbus_write_byte_data(client, MPU3050_SMPLRT_DIV, sensor->poll_interval - 1); if (ret < 0) return ret; /* Set low pass filter and full scale */ reg = MPU3050_DLPF_CFG_42HZ; reg |= MPU3050_DEFAULT_FS_RANGE << 3; reg |= MPU3050_EXT_SYNC_NONE << 5; ret = i2c_smbus_write_byte_data(client, MPU3050_DLPF_FS_SYNC, reg); if (ret < 0) return ret; return 0; } #ifdef CONFIG_OF static int mpu3050_parse_dt(struct device *dev, struct mpu3050_gyro_platform_data *pdata) { int rc = 0; rc = of_property_read_u32(dev->of_node, "invn,poll-interval", &pdata->poll_interval); if (rc) { dev_err(dev, "Failed to read poll-interval\n"); return rc; } /* check gpio_int later, if it is invalid, just use poll */ pdata->gpio_int = of_get_named_gpio_flags(dev->of_node, "invn,gpio-int", 0, NULL); pdata->gpio_en = of_get_named_gpio_flags(dev->of_node, "invn,gpio-en", 0, NULL); if (!gpio_is_valid(pdata->gpio_en)) return -EINVAL; return 0; } #else static int mpu3050_parse_dt(struct device *dev, struct mpu3050_gyro_platform_data *pdata) { return -EINVAL; } #endif /** * mpu3050_probe - device detection callback * @client: i2c client of found device * @id: id match information * * The I2C layer calls us when it believes a sensor is present at this * address. Probe to see if this is correct and to validate the device. * * If present install the relevant sysfs interfaces and input device. */ static int __devinit mpu3050_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct mpu3050_sensor *sensor; struct input_dev *idev; struct mpu3050_gyro_platform_data *pdata; int ret; int error; u32 i; sensor = kzalloc(sizeof(struct mpu3050_sensor), GFP_KERNEL); idev = input_allocate_device(); if (!sensor || !idev) { dev_err(&client->dev, "failed to allocate driver data\n"); error = -ENOMEM; goto err_free_mem; } sensor->client = client; sensor->dev = &client->dev; sensor->idev = idev; i2c_set_clientdata(client, sensor); if (client->dev.of_node) { pdata = devm_kzalloc(&client->dev, sizeof(struct mpu3050_gyro_platform_data), GFP_KERNEL); if (!pdata) { dev_err(&client->dev, "Failed to allcated memory\n"); error = -ENOMEM; goto err_free_mem; } ret = mpu3050_parse_dt(&client->dev, pdata); if (ret) { dev_err(&client->dev, "Failed to parse device tree\n"); error = ret; goto err_free_mem; } } else pdata = client->dev.platform_data; sensor->platform_data = pdata; if (sensor->platform_data) { u32 interval = sensor->platform_data->poll_interval; sensor->enable_gpio = sensor->platform_data->gpio_en; if ((interval < MPU3050_MIN_POLL_INTERVAL) || (interval > MPU3050_MAX_POLL_INTERVAL)) sensor->poll_interval = MPU3050_DEFAULT_POLL_INTERVAL; else sensor->poll_interval = interval; } else { sensor->poll_interval = MPU3050_DEFAULT_POLL_INTERVAL; sensor->enable_gpio = -EINVAL; } if (gpio_is_valid(sensor->enable_gpio)) { ret = gpio_request(sensor->enable_gpio, "GYRO_EN_PM"); gpio_direction_output(sensor->enable_gpio, 1); } mpu3050_set_power_mode(client, 1); ret = i2c_smbus_read_byte_data(client, MPU3050_CHIP_ID_REG); if (ret < 0) { dev_err(&client->dev, "failed to detect device\n"); error = -ENXIO; goto err_free_mem; } for (i = 0; i < ARRAY_SIZE(mpu3050_chip_ids); i++) if (ret == mpu3050_chip_ids[i]) break; if (i == ARRAY_SIZE(mpu3050_chip_ids)) { dev_err(&client->dev, "unsupported chip id\n"); error = -ENXIO; goto err_free_mem; } idev->name = "MPU3050"; idev->id.bustype = BUS_I2C; idev->open = mpu3050_input_open; idev->close = mpu3050_input_close; input_set_capability(idev, EV_ABS, ABS_MISC); input_set_abs_params(idev, ABS_X, MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); input_set_abs_params(idev, ABS_Y, MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); input_set_abs_params(idev, ABS_Z, MPU3050_MIN_VALUE, MPU3050_MAX_VALUE, 0, 0); input_set_drvdata(idev, sensor); pm_runtime_set_active(&client->dev); error = mpu3050_hw_init(sensor); if (error) goto err_pm_set_suspended; if (client->irq == 0) { sensor->use_poll = 1; INIT_DELAYED_WORK(&sensor->input_work, mpu3050_input_work_fn); } else { sensor->use_poll = 0; if (gpio_is_valid(sensor->platform_data->gpio_int)) { /* configure interrupt gpio */ ret = gpio_request(sensor->platform_data->gpio_int, "gyro_gpio_int"); if (ret) { pr_err("%s: unable to request interrupt gpio %d\n", __func__, sensor->platform_data->gpio_int); goto err_pm_set_suspended; } ret = gpio_direction_input( sensor->platform_data->gpio_int); if (ret) { pr_err("%s: unable to set direction for gpio %d\n", __func__, sensor->platform_data->gpio_int); goto err_free_gpio; } client->irq = gpio_to_irq( sensor->platform_data->gpio_int); } else { ret = -EINVAL; goto err_pm_set_suspended; } error = request_threaded_irq(client->irq, NULL, mpu3050_interrupt_thread, IRQF_TRIGGER_FALLING, "mpu3050", sensor); if (error) { dev_err(&client->dev, "can't get IRQ %d, error %d\n", client->irq, error); goto err_pm_set_suspended; } disable_irq(client->irq); } error = input_register_device(idev); if (error) { dev_err(&client->dev, "failed to register input device\n"); goto err_free_irq; } error = sensors_classdev_register(&client->dev, &sensors_cdev); if (error < 0) { dev_err(&client->dev, "failed to create class device\n"); goto err_input_cleanup; } error = create_sysfs_interfaces(&idev->dev); if (error < 0) { dev_err(&client->dev, "failed to create sysfs\n"); goto err_class_sysfs; } pm_runtime_enable(&client->dev); pm_runtime_set_autosuspend_delay(&client->dev, MPU3050_AUTO_DELAY); return 0; err_class_sysfs: sensors_classdev_unregister(&sensors_cdev); err_input_cleanup: input_unregister_device(idev); err_free_irq: if (client->irq > 0) free_irq(client->irq, sensor); err_free_gpio: if ((client->irq > 0) && (gpio_is_valid(sensor->platform_data->gpio_int))) gpio_free(sensor->platform_data->gpio_int); err_pm_set_suspended: pm_runtime_set_suspended(&client->dev); err_free_mem: input_free_device(idev); kfree(sensor); return error; } /** * mpu3050_remove - remove a sensor * @client: i2c client of sensor being removed * * Our sensor is going away, clean up the resources. */ static int __devexit mpu3050_remove(struct i2c_client *client) { struct mpu3050_sensor *sensor = i2c_get_clientdata(client); pm_runtime_disable(&client->dev); pm_runtime_set_suspended(&client->dev); if (client->irq) free_irq(client->irq, sensor); remove_sysfs_interfaces(&client->dev); if (gpio_is_valid(sensor->enable_gpio)) gpio_free(sensor->enable_gpio); input_unregister_device(sensor->idev); kfree(sensor); return 0; } #ifdef CONFIG_PM /** * mpu3050_suspend - called on device suspend * @dev: device being suspended * * Put the device into sleep mode before we suspend the machine. */ static int mpu3050_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mpu3050_sensor *sensor = i2c_get_clientdata(client); if (!sensor->use_poll) disable_irq(client->irq); mpu3050_set_power_mode(client, 0); return 0; } /** * mpu3050_resume - called on device resume * @dev: device being resumed * * Put the device into powered mode on resume. */ static int mpu3050_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mpu3050_sensor *sensor = i2c_get_clientdata(client); mpu3050_set_power_mode(client, 1); if (!sensor->use_poll) enable_irq(client->irq); return 0; } #endif static UNIVERSAL_DEV_PM_OPS(mpu3050_pm, mpu3050_suspend, mpu3050_resume, NULL); static const struct i2c_device_id mpu3050_ids[] = { { "mpu3050", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mpu3050_ids); static const struct of_device_id mpu3050_of_match[] = { { .compatible = "invn,mpu3050", }, { }, }; MODULE_DEVICE_TABLE(of, mpu3050_of_match); static struct i2c_driver mpu3050_i2c_driver = { .driver = { .name = "mpu3050", .owner = THIS_MODULE, .pm = &mpu3050_pm, .of_match_table = mpu3050_of_match, }, .probe = mpu3050_probe, .remove = __devexit_p(mpu3050_remove), .id_table = mpu3050_ids, }; module_i2c_driver(mpu3050_i2c_driver); MODULE_AUTHOR("Wistron Corp."); MODULE_DESCRIPTION("MPU3050 Tri-axis gyroscope driver"); MODULE_LICENSE("GPL");
gpl-2.0
oldzhu/linux
drivers/infiniband/hw/hns/hns_roce_cq.c
59
12518
/* * Copyright (c) 2016 Hisilicon Limited. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/platform_device.h> #include <rdma/ib_umem.h> #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" #include <rdma/hns-abi.h> #include "hns_roce_common.h" static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq) { struct ib_cq *ibcq = &hr_cq->ib_cq; ibcq->comp_handler(ibcq, ibcq->cq_context); } static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq, enum hns_roce_event event_type) { struct hns_roce_dev *hr_dev; struct ib_event event; struct ib_cq *ibcq; ibcq = &hr_cq->ib_cq; hr_dev = to_hr_dev(ibcq->device); if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { dev_err(&hr_dev->pdev->dev, "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n", event_type, hr_cq->cqn); return; } if (ibcq->event_handler) { event.device = ibcq->device; event.event = IB_EVENT_CQ_ERR; event.element.cq = ibcq; ibcq->event_handler(&event, ibcq->cq_context); } } static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, struct hns_roce_cmd_mailbox *mailbox, unsigned long cq_num) { return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0, HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS); } static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, struct hns_roce_mtt *hr_mtt, struct hns_roce_uar *hr_uar, struct hns_roce_cq *hr_cq, int vector) { struct hns_roce_cmd_mailbox *mailbox = NULL; struct hns_roce_cq_table *cq_table = NULL; struct device *dev = &hr_dev->pdev->dev; dma_addr_t dma_handle; u64 *mtts = NULL; int ret = 0; cq_table = &hr_dev->cq_table; /* Get the physical address of cq buf */ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table, hr_mtt->first_seg, &dma_handle); if (!mtts) { dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n"); return -EINVAL; } if (vector >= hr_dev->caps.num_comp_vectors) { dev_err(dev, "CQ alloc.Invalid vector.\n"); return -EINVAL; } hr_cq->vector = vector; ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); if (ret == -1) { dev_err(dev, "CQ alloc.Failed to alloc index.\n"); return -ENOMEM; } /* Get CQC memory HEM(Hardware Entry Memory) table */ ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); if (ret) { dev_err(dev, "CQ alloc.Failed to get context mem.\n"); goto err_out; } /* The cq insert radix tree */ spin_lock_irq(&cq_table->lock); /* Radix_tree: The associated pointer and long integer key value like */ ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq); spin_unlock_irq(&cq_table->lock); if (ret) { dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n"); goto err_put; } /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) { ret = PTR_ERR(mailbox); goto err_radix; } hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle, nent, vector); /* Send mailbox to hw */ ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) { dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n"); goto err_radix; } hr_cq->cons_index = 0; hr_cq->uar = hr_uar; atomic_set(&hr_cq->refcount, 1); init_completion(&hr_cq->free); return 0; err_radix: spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, hr_cq->cqn); spin_unlock_irq(&cq_table->lock); err_put: hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); err_out: hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); return ret; } static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev, struct hns_roce_cmd_mailbox *mailbox, unsigned long cq_num) { return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num, mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS); } void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct device *dev = &hr_dev->pdev->dev; int ret; ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn); if (ret) dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret, hr_cq->cqn); /* Waiting interrupt process procedure carried out */ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); /* wait for all interrupt processed */ if (atomic_dec_and_test(&hr_cq->refcount)) complete(&hr_cq->free); wait_for_completion(&hr_cq->free); spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, hr_cq->cqn); spin_unlock_irq(&cq_table->lock); hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); } static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, struct ib_ucontext *context, struct hns_roce_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) { int ret; *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(*umem)) return PTR_ERR(*umem); ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem), ilog2((unsigned int)(*umem)->page_size), &buf->hr_mtt); if (ret) goto err_buf; ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem); if (ret) goto err_mtt; return 0; err_mtt: hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt); err_buf: ib_umem_release(*umem); return ret; } static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq_buf *buf, u32 nent) { int ret; ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz, PAGE_SIZE * 2, &buf->hr_buf); if (ret) goto out; ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages, buf->hr_buf.page_shift, &buf->hr_mtt); if (ret) goto err_buf; ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf); if (ret) goto err_mtt; return 0; err_mtt: hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt); err_buf: hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz, &buf->hr_buf); out: return ret; } static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq_buf *buf, int cqe) { hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz, &buf->hr_buf); } struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct device *dev = &hr_dev->pdev->dev; struct hns_roce_ib_create_cq ucmd; struct hns_roce_cq *hr_cq = NULL; struct hns_roce_uar *uar = NULL; int vector = attr->comp_vector; int cq_entries = attr->cqe; int ret = 0; if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", cq_entries, hr_dev->caps.max_cqes); return ERR_PTR(-EINVAL); } hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL); if (!hr_cq) return ERR_PTR(-ENOMEM); /* In v1 engine, parameter verification */ if (cq_entries < HNS_ROCE_MIN_CQE_NUM) cq_entries = HNS_ROCE_MIN_CQE_NUM; cq_entries = roundup_pow_of_two((unsigned int)cq_entries); hr_cq->ib_cq.cqe = cq_entries - 1; spin_lock_init(&hr_cq->lock); if (context) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "Failed to copy_from_udata.\n"); ret = -EFAULT; goto err_cq; } /* Get user space address, write it into mtt table */ ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf, &hr_cq->umem, ucmd.buf_addr, cq_entries); if (ret) { dev_err(dev, "Failed to get_cq_umem.\n"); goto err_cq; } /* Get user space parameters */ uar = &to_hr_ucontext(context)->uar; } else { /* Init mmt table and write buff address to mtt table */ ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries); if (ret) { dev_err(dev, "Failed to alloc_cq_buf.\n"); goto err_cq; } uar = &hr_dev->priv_uar; hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG + 0x1000 * uar->index; } /* Allocate cq index, fill cq_context */ ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, hr_cq, vector); if (ret) { dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); goto err_mtt; } /* * For the QP created by kernel space, tptr value should be initialized * to zero; For the QP created by user space, it will cause synchronous * problems if tptr is set to zero here, so we initialze it in user * space. */ if (!context) *hr_cq->tptr_addr = 0; /* Get created cq handler and carry out event */ hr_cq->comp = hns_roce_ib_cq_comp; hr_cq->event = hns_roce_ib_cq_event; hr_cq->cq_depth = cq_entries; if (context) { if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) { ret = -EFAULT; goto err_cqc; } } return &hr_cq->ib_cq; err_cqc: hns_roce_free_cq(hr_dev, hr_cq); err_mtt: hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); if (context) ib_umem_release(hr_cq->umem); else hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe); err_cq: kfree(hr_cq); return ERR_PTR(ret); } int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) { struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); int ret = 0; if (hr_dev->hw->destroy_cq) { ret = hr_dev->hw->destroy_cq(ib_cq); } else { hns_roce_free_cq(hr_dev, hr_cq); hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); if (ib_cq->uobject) ib_umem_release(hr_cq->umem); else /* Free the buff of stored cq */ hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe); kfree(hr_cq); } return ret; } void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) { struct device *dev = &hr_dev->pdev->dev; struct hns_roce_cq *cq; cq = radix_tree_lookup(&hr_dev->cq_table.tree, cqn & (hr_dev->caps.num_cqs - 1)); if (!cq) { dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn); return; } cq->comp(cq); } void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct device *dev = &hr_dev->pdev->dev; struct hns_roce_cq *cq; cq = radix_tree_lookup(&cq_table->tree, cqn & (hr_dev->caps.num_cqs - 1)); if (cq) atomic_inc(&cq->refcount); if (!cq) { dev_warn(dev, "Async event for bogus CQ %08x\n", cqn); return; } cq->event(cq, (enum hns_roce_event)event_type); if (atomic_dec_and_test(&cq->refcount)) complete(&cq->free); } int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs, hr_dev->caps.num_cqs - 1, hr_dev->caps.reserved_cqs, 0); } void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev) { hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap); }
gpl-2.0
ivkos/kernel_i9500
drivers/power/android_battery.c
59
22007
/* * android_battery.c * Android Battery Driver * * Copyright (C) 2012 Google, Inc. * Copyright (C) 2012 Samsung Electronics * * Based on work by himihee.seo@samsung.com, ms925.kim@samsung.com, and * joshua.chang@samsung.com. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/types.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/wakelock.h> #include <linux/workqueue.h> #include <linux/alarmtimer.h> #include <linux/timer.h> #include <linux/mutex.h> #include <linux/debugfs.h> #include <linux/platform_data/android_battery.h> #define FAST_POLL (1 * 60) #define SLOW_POLL (10 * 60) struct android_bat_data { struct android_bat_platform_data *pdata; struct android_bat_callbacks callbacks; struct device *dev; struct power_supply psy_bat; struct power_supply psy_usb; struct power_supply psy_ac; struct wake_lock monitor_wake_lock; struct wake_lock charger_wake_lock; int charge_source; int batt_temp; int batt_current; unsigned int batt_health; unsigned int batt_vcell; unsigned int batt_soc; unsigned int charging_status; bool recharging; unsigned long charging_start_time; struct workqueue_struct *monitor_wqueue; struct work_struct monitor_work; struct work_struct charger_work; struct alarm monitor_alarm; ktime_t last_poll; struct dentry *debugfs_entry; }; static char *supply_list[] = { "android-battery", }; static enum power_supply_property android_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CURRENT_NOW, }; static enum power_supply_property android_power_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static DEFINE_MUTEX(android_bat_state_lock); static void android_bat_update_data(struct android_bat_data *battery); static int android_bat_enable_charging(struct android_bat_data *battery, bool enable); static char *charge_source_str(int charge_source) { switch (charge_source) { case CHARGE_SOURCE_NONE: return "none"; case CHARGE_SOURCE_AC: return "ac"; case CHARGE_SOURCE_USB: return "usb"; default: break; } return "?"; } static int android_bat_get_property(struct power_supply *ps, enum power_supply_property psp, union power_supply_propval *val) { struct android_bat_data *battery = container_of(ps, struct android_bat_data, psy_bat); switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = battery->charging_status; break; case POWER_SUPPLY_PROP_HEALTH: val->intval = battery->batt_health; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_TEMP: val->intval = battery->batt_temp; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = 1; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: android_bat_update_data(battery); val->intval = battery->batt_vcell; if (val->intval == -1) return -EINVAL; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = battery->batt_soc; if (val->intval == -1) return -EINVAL; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_CURRENT_NOW: android_bat_update_data(battery); val->intval = battery->batt_current; break; default: return -EINVAL; } return 0; } static int android_usb_get_property(struct power_supply *ps, enum power_supply_property psp, union power_supply_propval *val) { struct android_bat_data *battery = container_of(ps, struct android_bat_data, psy_usb); if (psp != POWER_SUPPLY_PROP_ONLINE) return -EINVAL; val->intval = (battery->charge_source == CHARGE_SOURCE_USB); return 0; } static int android_ac_get_property(struct power_supply *ps, enum power_supply_property psp, union power_supply_propval *val) { struct android_bat_data *battery = container_of(ps, struct android_bat_data, psy_ac); if (psp != POWER_SUPPLY_PROP_ONLINE) return -EINVAL; val->intval = (battery->charge_source == CHARGE_SOURCE_AC); return 0; } static void android_bat_get_temp(struct android_bat_data *battery) { int batt_temp = 42; /* 4.2C */ int health = battery->batt_health; if (battery->pdata->get_temperature) battery->pdata->get_temperature(&batt_temp); if (battery->charge_source != CHARGE_SOURCE_NONE) { if (batt_temp >= battery->pdata->temp_high_threshold) { if (health != POWER_SUPPLY_HEALTH_OVERHEAT && health != POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) { pr_info("battery overheat (%d>=%d), " \ "charging unavailable\n", batt_temp, battery->pdata->temp_high_threshold); battery->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT; } } else if (batt_temp <= battery->pdata->temp_high_recovery && batt_temp >= battery->pdata->temp_low_recovery) { if (health == POWER_SUPPLY_HEALTH_OVERHEAT || health == POWER_SUPPLY_HEALTH_COLD) { pr_info("battery recovery (%d,%d~%d)," \ "charging available\n", batt_temp, battery->pdata->temp_low_recovery, battery->pdata->temp_high_recovery); battery->batt_health = POWER_SUPPLY_HEALTH_GOOD; } } else if (batt_temp <= battery->pdata->temp_low_threshold) { if (health != POWER_SUPPLY_HEALTH_COLD && health != POWER_SUPPLY_HEALTH_UNSPEC_FAILURE) { pr_info("battery cold (%d <= %d)," \ "charging unavailable\n", batt_temp, battery->pdata->temp_low_threshold); battery->batt_health = POWER_SUPPLY_HEALTH_COLD; } } } battery->batt_temp = batt_temp; } /* * android_bat_state_lock not held, may call back into * android_bat_charge_source_changed. Gathering data here can be * non-atomic; updating our state based on the data may need to be * atomic. */ static void android_bat_update_data(struct android_bat_data *battery) { int ret; int v; if (battery->pdata->poll_charge_source) battery->charge_source = battery->pdata->poll_charge_source(); if (battery->pdata->get_voltage_now) { ret = battery->pdata->get_voltage_now(); battery->batt_vcell = ret >= 0 ? ret : 4242000; } if (battery->pdata->get_capacity) { ret = battery->pdata->get_capacity(); battery->batt_soc = ret >= 0 ? ret : 42; } if (battery->pdata->get_current_now) { ret = battery->pdata->get_current_now(&v); if (!ret) battery->batt_current = v; } android_bat_get_temp(battery); } static void android_bat_set_charge_time(struct android_bat_data *battery, bool enable) { if (enable && !battery->charging_start_time) { struct timespec cur_time; get_monotonic_boottime(&cur_time); /* record start time for charge timeout timer */ battery->charging_start_time = cur_time.tv_sec; } else if (!enable) { /* clear charge timeout timer */ battery->charging_start_time = 0; } } static int android_bat_enable_charging(struct android_bat_data *battery, bool enable) { if (enable && (battery->batt_health != POWER_SUPPLY_HEALTH_GOOD)) { battery->charging_status = POWER_SUPPLY_STATUS_NOT_CHARGING; return -EPERM; } if (enable) { if (battery->pdata && battery->pdata->set_charging_current) battery->pdata->set_charging_current (battery->charge_source); } if (battery->pdata && battery->pdata->set_charging_enable) battery->pdata->set_charging_enable(enable); android_bat_set_charge_time(battery, enable); pr_info("battery: enable=%d charger: %s\n", enable, charge_source_str(battery->charge_source)); return 0; } static bool android_bat_charge_timeout(struct android_bat_data *battery, unsigned long timeout) { struct timespec cur_time; if (!battery->charging_start_time) return 0; get_monotonic_boottime(&cur_time); pr_debug("%s: Start time: %ld, End time: %ld, current time: %ld\n", __func__, battery->charging_start_time, battery->charging_start_time + timeout, cur_time.tv_sec); return cur_time.tv_sec >= battery->charging_start_time + timeout; } static void android_bat_charging_timer(struct android_bat_data *battery) { if (!battery->charging_start_time && battery->charging_status == POWER_SUPPLY_STATUS_CHARGING) { android_bat_enable_charging(battery, true); battery->recharging = true; pr_debug("%s: charge status charging but timer is expired\n", __func__); } else if (battery->charging_start_time == 0) { pr_debug("%s: charging_start_time never initialized\n", __func__); return; } if (android_bat_charge_timeout( battery, battery->recharging ? battery->pdata->recharging_time : battery->pdata->full_charging_time)) { android_bat_enable_charging(battery, false); if (battery->batt_vcell > battery->pdata->recharging_voltage && battery->batt_soc == 100) battery->charging_status = POWER_SUPPLY_STATUS_FULL; battery->recharging = false; battery->charging_start_time = 0; pr_info("battery: charging timer expired\n"); } return; } static void android_bat_charge_source_changed(struct android_bat_callbacks *ptr, int charge_source) { struct android_bat_data *battery = container_of(ptr, struct android_bat_data, callbacks); wake_lock(&battery->charger_wake_lock); mutex_lock(&android_bat_state_lock); battery->charge_source = charge_source; pr_info("battery: charge source type was changed: %s\n", charge_source_str(battery->charge_source)); mutex_unlock(&android_bat_state_lock); queue_work(battery->monitor_wqueue, &battery->charger_work); } static void android_bat_set_full_status(struct android_bat_callbacks *ptr) { struct android_bat_data *battery = container_of(ptr, struct android_bat_data, callbacks); mutex_lock(&android_bat_state_lock); pr_info("battery: battery full\n"); battery->charging_status = POWER_SUPPLY_STATUS_FULL; android_bat_enable_charging(battery, false); battery->recharging = false; mutex_unlock(&android_bat_state_lock); power_supply_changed(&battery->psy_bat); } static void android_bat_charger_work(struct work_struct *work) { struct android_bat_data *battery = container_of(work, struct android_bat_data, charger_work); mutex_lock(&android_bat_state_lock); switch (battery->charge_source) { case CHARGE_SOURCE_NONE: battery->charging_status = POWER_SUPPLY_STATUS_DISCHARGING; android_bat_enable_charging(battery, false); battery->batt_health = POWER_SUPPLY_HEALTH_GOOD; battery->recharging = false; battery->charging_start_time = 0; break; case CHARGE_SOURCE_USB: case CHARGE_SOURCE_AC: /* * If charging status indicates a charger was already * connected prior to this and the status is something * other than charging ("full" or "not-charging"), leave * the status alone. */ if (battery->charging_status == POWER_SUPPLY_STATUS_DISCHARGING || battery->charging_status == POWER_SUPPLY_STATUS_UNKNOWN) battery->charging_status = POWER_SUPPLY_STATUS_CHARGING; /* * Don't re-enable charging if the battery is full and we * are not actively re-charging it, or if "not-charging" * status is set. */ if (!((battery->charging_status == POWER_SUPPLY_STATUS_FULL && !battery->recharging) || battery->charging_status == POWER_SUPPLY_STATUS_NOT_CHARGING)) android_bat_enable_charging(battery, true); break; default: pr_err("%s: Invalid charger type\n", __func__); break; } mutex_unlock(&android_bat_state_lock); wake_lock_timeout(&battery->charger_wake_lock, HZ * 2); power_supply_changed(&battery->psy_ac); power_supply_changed(&battery->psy_usb); } static void android_bat_monitor_set_alarm(struct android_bat_data *battery, int seconds) { alarm_start(&battery->monitor_alarm, ktime_add(battery->last_poll, ktime_set(seconds, 0))); } static void android_bat_monitor_work(struct work_struct *work) { struct android_bat_data *battery = container_of(work, struct android_bat_data, monitor_work); struct timespec cur_time; wake_lock(&battery->monitor_wake_lock); android_bat_update_data(battery); mutex_lock(&android_bat_state_lock); switch (battery->charging_status) { case POWER_SUPPLY_STATUS_FULL: if (battery->batt_vcell < battery->pdata->recharging_voltage && !battery->recharging) { battery->recharging = true; android_bat_enable_charging(battery, true); pr_info("battery: start recharging, v=%d\n", battery->batt_vcell/1000); } break; case POWER_SUPPLY_STATUS_DISCHARGING: break; case POWER_SUPPLY_STATUS_CHARGING: switch (battery->batt_health) { case POWER_SUPPLY_HEALTH_OVERHEAT: case POWER_SUPPLY_HEALTH_COLD: case POWER_SUPPLY_HEALTH_OVERVOLTAGE: case POWER_SUPPLY_HEALTH_DEAD: case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE: battery->charging_status = POWER_SUPPLY_STATUS_NOT_CHARGING; android_bat_enable_charging(battery, false); pr_info("battery: Not charging, health=%d\n", battery->batt_health); break; default: break; } break; case POWER_SUPPLY_STATUS_NOT_CHARGING: if (battery->batt_health == POWER_SUPPLY_HEALTH_GOOD) { pr_info("battery: battery health recovered\n"); if (battery->charge_source != CHARGE_SOURCE_NONE) { android_bat_enable_charging(battery, true); battery->charging_status = POWER_SUPPLY_STATUS_CHARGING; } else { battery->charging_status = POWER_SUPPLY_STATUS_DISCHARGING; } } break; default: pr_err("%s: Undefined battery status: %d\n", __func__, battery->charging_status); break; } android_bat_charging_timer(battery); get_monotonic_boottime(&cur_time); pr_info("battery: l=%d v=%d c=%d temp=%s%ld.%ld h=%d st=%d%s ct=%lu type=%s\n", battery->batt_soc, battery->batt_vcell/1000, battery->batt_current, battery->batt_temp < 0 ? "-" : "", abs(battery->batt_temp / 10), abs(battery->batt_temp % 10), battery->batt_health, battery->charging_status, battery->recharging ? "r" : "", battery->charging_start_time ? cur_time.tv_sec - battery->charging_start_time : 0, charge_source_str(battery->charge_source)); mutex_unlock(&android_bat_state_lock); power_supply_changed(&battery->psy_bat); battery->last_poll = ktime_get_boottime(); android_bat_monitor_set_alarm(battery, FAST_POLL); wake_unlock(&battery->monitor_wake_lock); return; } static enum alarmtimer_restart android_bat_monitor_alarm( struct alarm *alarm, ktime_t now) { struct android_bat_data *battery = container_of(alarm, struct android_bat_data, monitor_alarm); wake_lock(&battery->monitor_wake_lock); queue_work(battery->monitor_wqueue, &battery->monitor_work); return ALARMTIMER_NORESTART; } static int android_power_debug_dump(struct seq_file *s, void *unused) { struct android_bat_data *battery = s->private; struct timespec cur_time; android_bat_update_data(battery); get_monotonic_boottime(&cur_time); mutex_lock(&android_bat_state_lock); seq_printf(s, "l=%d v=%d c=%d temp=%s%ld.%ld h=%d st=%d%s ct=%lu type=%s\n", battery->batt_soc, battery->batt_vcell/1000, battery->batt_current, battery->batt_temp < 0 ? "-" : "", abs(battery->batt_temp / 10), abs(battery->batt_temp % 10), battery->batt_health, battery->charging_status, battery->recharging ? "r" : "", battery->charging_start_time ? cur_time.tv_sec - battery->charging_start_time : 0, charge_source_str(battery->charge_source)); mutex_unlock(&android_bat_state_lock); return 0; } static int android_power_debug_open(struct inode *inode, struct file *file) { return single_open(file, android_power_debug_dump, inode->i_private); } static const struct file_operations android_power_debug_fops = { .open = android_power_debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static __devinit int android_bat_probe(struct platform_device *pdev) { struct android_bat_platform_data *pdata = dev_get_platdata(&pdev->dev); struct android_bat_data *battery; int ret = 0; dev_info(&pdev->dev, "Android Battery Driver\n"); battery = kzalloc(sizeof(*battery), GFP_KERNEL); if (!battery) return -ENOMEM; battery->pdata = pdata; if (!battery->pdata) { pr_err("%s : No platform data\n", __func__); ret = -EINVAL; goto err_pdata; } battery->dev = &pdev->dev; platform_set_drvdata(pdev, battery); battery->batt_health = POWER_SUPPLY_HEALTH_GOOD; battery->psy_bat.name = "android-battery", battery->psy_bat.type = POWER_SUPPLY_TYPE_BATTERY, battery->psy_bat.properties = android_battery_props, battery->psy_bat.num_properties = ARRAY_SIZE(android_battery_props), battery->psy_bat.get_property = android_bat_get_property, battery->psy_usb.name = "android-usb", battery->psy_usb.type = POWER_SUPPLY_TYPE_USB, battery->psy_usb.supplied_to = supply_list, battery->psy_usb.num_supplicants = ARRAY_SIZE(supply_list), battery->psy_usb.properties = android_power_props, battery->psy_usb.num_properties = ARRAY_SIZE(android_power_props), battery->psy_usb.get_property = android_usb_get_property, battery->psy_ac.name = "android-ac", battery->psy_ac.type = POWER_SUPPLY_TYPE_MAINS, battery->psy_ac.supplied_to = supply_list, battery->psy_ac.num_supplicants = ARRAY_SIZE(supply_list), battery->psy_ac.properties = android_power_props, battery->psy_ac.num_properties = ARRAY_SIZE(android_power_props), battery->psy_ac.get_property = android_ac_get_property; battery->batt_vcell = -1; battery->batt_soc = -1; wake_lock_init(&battery->monitor_wake_lock, WAKE_LOCK_SUSPEND, "android-battery-monitor"); wake_lock_init(&battery->charger_wake_lock, WAKE_LOCK_SUSPEND, "android-chargerdetect"); ret = power_supply_register(&pdev->dev, &battery->psy_bat); if (ret) { dev_err(battery->dev, "%s: failed to register psy_bat\n", __func__); goto err_psy_bat_reg; } ret = power_supply_register(&pdev->dev, &battery->psy_usb); if (ret) { dev_err(battery->dev, "%s: failed to register psy_usb\n", __func__); goto err_psy_usb_reg; } ret = power_supply_register(&pdev->dev, &battery->psy_ac); if (ret) { dev_err(battery->dev, "%s: failed to register psy_ac\n", __func__); goto err_psy_ac_reg; } battery->monitor_wqueue = alloc_workqueue(dev_name(&pdev->dev), WQ_FREEZABLE, 1); if (!battery->monitor_wqueue) { dev_err(battery->dev, "%s: fail to create workqueue\n", __func__); goto err_wq; } INIT_WORK(&battery->monitor_work, android_bat_monitor_work); INIT_WORK(&battery->charger_work, android_bat_charger_work); battery->callbacks.charge_source_changed = android_bat_charge_source_changed; battery->callbacks.battery_set_full = android_bat_set_full_status; if (battery->pdata && battery->pdata->register_callbacks) battery->pdata->register_callbacks(&battery->callbacks); /* get initial charger status */ if (battery->pdata->poll_charge_source) battery->charge_source = battery->pdata->poll_charge_source(); wake_lock(&battery->charger_wake_lock); queue_work(battery->monitor_wqueue, &battery->charger_work); wake_lock(&battery->monitor_wake_lock); battery->last_poll = ktime_get_boottime(); alarm_init(&battery->monitor_alarm, ALARM_BOOTTIME, android_bat_monitor_alarm); queue_work(battery->monitor_wqueue, &battery->monitor_work); battery->debugfs_entry = debugfs_create_file("android-power", S_IRUGO, NULL, battery, &android_power_debug_fops); if (!battery->debugfs_entry) pr_err("failed to create android-power debugfs entry\n"); return 0; err_wq: power_supply_unregister(&battery->psy_ac); err_psy_ac_reg: power_supply_unregister(&battery->psy_usb); err_psy_usb_reg: power_supply_unregister(&battery->psy_bat); err_psy_bat_reg: wake_lock_destroy(&battery->monitor_wake_lock); wake_lock_destroy(&battery->charger_wake_lock); err_pdata: kfree(battery); return ret; } static int __devexit android_bat_remove(struct platform_device *pdev) { struct android_bat_data *battery = platform_get_drvdata(pdev); alarm_cancel(&battery->monitor_alarm); flush_workqueue(battery->monitor_wqueue); destroy_workqueue(battery->monitor_wqueue); power_supply_unregister(&battery->psy_bat); wake_lock_destroy(&battery->monitor_wake_lock); wake_lock_destroy(&battery->charger_wake_lock); debugfs_remove(battery->debugfs_entry); kfree(battery); return 0; } static int android_bat_suspend(struct device *dev) { struct android_bat_data *battery = dev_get_drvdata(dev); cancel_work_sync(&battery->monitor_work); android_bat_monitor_set_alarm( battery, battery->charge_source == CHARGE_SOURCE_NONE ? SLOW_POLL : FAST_POLL); return 0; } static void android_bat_resume(struct device *dev) { struct android_bat_data *battery = dev_get_drvdata(dev); android_bat_monitor_set_alarm(battery, FAST_POLL); return; } static const struct dev_pm_ops android_bat_pm_ops = { .prepare = android_bat_suspend, .complete = android_bat_resume, }; static struct platform_driver android_bat_driver = { .driver = { .name = "android-battery", .owner = THIS_MODULE, .pm = &android_bat_pm_ops, }, .probe = android_bat_probe, .remove = __devexit_p(android_bat_remove), }; static int __init android_bat_init(void) { return platform_driver_register(&android_bat_driver); } static void __exit android_bat_exit(void) { platform_driver_unregister(&android_bat_driver); } late_initcall(android_bat_init); module_exit(android_bat_exit); MODULE_DESCRIPTION("Android battery driver"); MODULE_LICENSE("GPL");
gpl-2.0
Jazz-823/kernel_ayame
drivers/video/msm/mddi_ext.c
315
8657
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <mach/hardware.h> #include <asm/io.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <linux/clk.h> #include <mach/clk.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "msm_fb.h" #include "mddihosti.h" static int mddi_ext_probe(struct platform_device *pdev); static int mddi_ext_remove(struct platform_device *pdev); static int mddi_ext_off(struct platform_device *pdev); static int mddi_ext_on(struct platform_device *pdev); static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST]; static int pdev_list_cnt; static int mddi_ext_suspend(struct platform_device *pdev, pm_message_t state); static int mddi_ext_resume(struct platform_device *pdev); #ifdef CONFIG_HAS_EARLYSUSPEND static void mddi_ext_early_suspend(struct early_suspend *h); static void mddi_ext_early_resume(struct early_suspend *h); #endif static int mddi_ext_runtime_suspend(struct device *dev) { dev_dbg(dev, "pm_runtime: suspending...\n"); return 0; } static int mddi_ext_runtime_resume(struct device *dev) { dev_dbg(dev, "pm_runtime: resuming...\n"); return 0; } static int mddi_ext_runtime_idle(struct device *dev) { dev_dbg(dev, "pm_runtime: idling...\n"); return 0; } static struct dev_pm_ops mddi_ext_dev_pm_ops = { .runtime_suspend = mddi_ext_runtime_suspend, .runtime_resume = mddi_ext_runtime_resume, .runtime_idle = mddi_ext_runtime_idle, }; static struct platform_driver mddi_ext_driver = { .probe = mddi_ext_probe, .remove = mddi_ext_remove, #ifndef CONFIG_HAS_EARLYSUSPEND #ifdef CONFIG_PM .suspend = mddi_ext_suspend, .resume = mddi_ext_resume, #endif #endif .resume_early = NULL, .resume = NULL, .shutdown = NULL, .driver = { .name = "mddi_ext", .pm = &mddi_ext_dev_pm_ops, }, }; static struct clk *mddi_ext_clk; static struct clk *mddi_ext_pclk; static struct mddi_platform_data *mddi_ext_pdata; extern int int_mddi_ext_flag; static int mddi_ext_off(struct platform_device *pdev) { int ret = 0; ret = panel_next_off(pdev); mddi_host_stop_ext_display(); pm_runtime_put(&pdev->dev); return ret; } static int mddi_ext_on(struct platform_device *pdev) { int ret = 0; u32 clk_rate; struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); pm_runtime_get(&pdev->dev); clk_rate = mfd->fbi->var.pixclock; clk_rate = min(clk_rate, mfd->panel_info.clk_max); if (mddi_ext_pdata && mddi_ext_pdata->mddi_sel_clk && mddi_ext_pdata->mddi_sel_clk(&clk_rate)) printk(KERN_ERR "%s: can't select mddi io clk targate rate = %d\n", __func__, clk_rate); if (clk_set_min_rate(mddi_ext_clk, clk_rate) < 0) printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__); mddi_host_start_ext_display(); ret = panel_next_on(pdev); return ret; } static int mddi_ext_resource_initialized; static int mddi_ext_probe(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct platform_device *mdp_dev = NULL; struct msm_fb_panel_data *pdata = NULL; int rc; resource_size_t size ; u32 clk_rate; if ((pdev->id == 0) && (pdev->num_resources >= 0)) { mddi_ext_pdata = pdev->dev.platform_data; size = resource_size(&pdev->resource[0]); msm_emdh_base = ioremap(pdev->resource[0].start, size); MSM_FB_INFO("external mddi base address = 0x%x\n", pdev->resource[0].start); if (unlikely(!msm_emdh_base)) return -ENOMEM; mddi_ext_resource_initialized = 1; return 0; } if (!mddi_ext_resource_initialized) return -EPERM; mfd = platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST) return -ENOMEM; mdp_dev = platform_device_alloc("mdp", pdev->id); if (!mdp_dev) return -ENOMEM; /* * link to the latest pdev */ mfd->pdev = mdp_dev; mfd->dest = DISPLAY_EXT_MDDI; /* * alloc panel device data */ if (platform_device_add_data (mdp_dev, pdev->dev.platform_data, sizeof(struct msm_fb_panel_data))) { printk(KERN_ERR "mddi_ext_probe: platform_device_add_data failed!\n"); platform_device_put(mdp_dev); return -ENOMEM; } /* * data chain */ pdata = mdp_dev->dev.platform_data; pdata->on = mddi_ext_on; pdata->off = mddi_ext_off; pdata->next = pdev; /* * get/set panel specific fb info */ mfd->panel_info = pdata->panel_info; mfd->fb_imgType = MDP_RGB_565; clk_rate = mfd->panel_info.clk_max; if (mddi_ext_pdata && mddi_ext_pdata->mddi_sel_clk && mddi_ext_pdata->mddi_sel_clk(&clk_rate)) printk(KERN_ERR "%s: can't select mddi io clk targate rate = %d\n", __func__, clk_rate); if (clk_set_max_rate(mddi_ext_clk, clk_rate) < 0) printk(KERN_ERR "%s: clk_set_max_rate failed\n", __func__); mfd->panel_info.clk_rate = mfd->panel_info.clk_min; /* * set driver data */ platform_set_drvdata(mdp_dev, mfd); rc = pm_runtime_set_active(&pdev->dev); if (rc < 0) printk(KERN_ERR "pm_runtime: fail to set active\n"); rc = 0; pm_runtime_enable(&pdev->dev); /* * register in mdp driver */ rc = platform_device_add(mdp_dev); if (rc) goto mddi_ext_probe_err; pdev_list[pdev_list_cnt++] = pdev; #ifdef CONFIG_HAS_EARLYSUSPEND mfd->mddi_ext_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; mfd->mddi_ext_early_suspend.suspend = mddi_ext_early_suspend; mfd->mddi_ext_early_suspend.resume = mddi_ext_early_resume; register_early_suspend(&mfd->mddi_ext_early_suspend); #endif return 0; mddi_ext_probe_err: platform_device_put(mdp_dev); return rc; } static int mddi_ext_is_in_suspend; static int mddi_ext_suspend(struct platform_device *pdev, pm_message_t state) { if (mddi_ext_is_in_suspend) return 0; mddi_ext_is_in_suspend = 1; if (clk_set_min_rate(mddi_ext_clk, 0) < 0) printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__); clk_disable(mddi_ext_clk); if (mddi_ext_pclk) clk_disable(mddi_ext_pclk); disable_irq(INT_MDDI_EXT); return 0; } static int mddi_ext_resume(struct platform_device *pdev) { struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); if (!mddi_ext_is_in_suspend) return 0; mddi_ext_is_in_suspend = 0; enable_irq(INT_MDDI_EXT); clk_enable(mddi_ext_clk); if (mddi_ext_pclk) clk_enable(mddi_ext_pclk); return 0; } #ifdef CONFIG_HAS_EARLYSUSPEND static void mddi_ext_early_suspend(struct early_suspend *h) { pm_message_t state; struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type, mddi_ext_early_suspend); state.event = PM_EVENT_SUSPEND; mddi_ext_suspend(mfd->pdev, state); } static void mddi_ext_early_resume(struct early_suspend *h) { struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type, mddi_ext_early_suspend); mddi_ext_resume(mfd->pdev); } #endif static int mddi_ext_remove(struct platform_device *pdev) { pm_runtim_disable(&pdev->dev); iounmap(msm_emdh_base); return 0; } static int mddi_ext_register_driver(void) { return platform_driver_register(&mddi_ext_driver); } static int __init mddi_ext_driver_init(void) { int ret; mddi_ext_clk = clk_get(NULL, "emdh_clk"); if (IS_ERR(mddi_ext_clk)) { printk(KERN_ERR "can't find emdh_clk\n"); return PTR_ERR(mddi_ext_clk); } clk_enable(mddi_ext_clk); mddi_ext_pclk = clk_get(NULL, "emdh_pclk"); if (IS_ERR(mddi_ext_pclk)) mddi_ext_pclk = NULL; else clk_enable(mddi_ext_pclk); ret = mddi_ext_register_driver(); if (ret) { clk_disable(mddi_ext_clk); clk_put(mddi_ext_clk); if (mddi_ext_pclk) { clk_disable(mddi_ext_pclk); clk_put(mddi_ext_pclk); } printk(KERN_ERR "mddi_ext_register_driver() failed!\n"); return ret; } mddi_init(); return ret; } module_init(mddi_ext_driver_init);
gpl-2.0
UDOOboard/Kernel_Unico
fs/coda/pioctl.c
2363
2241
/* * Pioctl operations for Coda. * Original version: (C) 1996 Peter Braam * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University * * Carnegie Mellon encourages users of this code to contribute improvements * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/namei.h> #include <linux/module.h> #include <asm/uaccess.h> #include <linux/coda.h> #include <linux/coda_psdev.h> #include "coda_linux.h" /* pioctl ops */ static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags); static long coda_pioctl(struct file *filp, unsigned int cmd, unsigned long user_data); /* exported from this file */ const struct inode_operations coda_ioctl_inode_operations = { .permission = coda_ioctl_permission, .setattr = coda_setattr, }; const struct file_operations coda_ioctl_operations = { .owner = THIS_MODULE, .unlocked_ioctl = coda_pioctl, .llseek = noop_llseek, }; /* the coda pioctl inode ops */ static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) { return (mask & MAY_EXEC) ? -EACCES : 0; } static long coda_pioctl(struct file *filp, unsigned int cmd, unsigned long user_data) { struct path path; int error; struct PioctlData data; struct inode *inode = filp->f_dentry->d_inode; struct inode *target_inode = NULL; struct coda_inode_info *cnp; /* get the Pioctl data arguments from user space */ if (copy_from_user(&data, (void __user *)user_data, sizeof(data))) return -EINVAL; /* * Look up the pathname. Note that the pathname is in * user memory, and namei takes care of this */ if (data.follow) error = user_path(data.path, &path); else error = user_lpath(data.path, &path); if (error) return error; target_inode = path.dentry->d_inode; /* return if it is not a Coda inode */ if (target_inode->i_sb != inode->i_sb) { error = -EINVAL; goto out; } /* now proceed to make the upcall */ cnp = ITOC(target_inode); error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data); out: path_put(&path); return error; }
gpl-2.0
nspierbundel/amlogic-common
arch/arm/mach-mxs/devices/platform-fec.c
2363
1315
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <asm/sizes.h> #include <mach/mx28.h> #include <mach/devices-common.h> #define mxs_fec_data_entry_single(soc, _id) \ { \ .id = _id, \ .iobase = soc ## _ENET_MAC ## _id ## _BASE_ADDR, \ .irq = soc ## _INT_ENET_MAC ## _id, \ } #define mxs_fec_data_entry(soc, _id) \ [_id] = mxs_fec_data_entry_single(soc, _id) #ifdef CONFIG_SOC_IMX28 const struct mxs_fec_data mx28_fec_data[] __initconst = { #define mx28_fec_data_entry(_id) \ mxs_fec_data_entry(MX28, _id) mx28_fec_data_entry(0), mx28_fec_data_entry(1), }; #endif struct platform_device *__init mxs_add_fec( const struct mxs_fec_data *data, const struct fec_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return mxs_add_platform_device_dmamask("imx28-fec", data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); }
gpl-2.0
tipnispranav/android-goldfish-3.4-rt
net/netfilter/nf_conntrack_pptp.c
4923
18853
/* * Connection tracking support for PPTP (Point to Point Tunneling Protocol). * PPTP is a a protocol for creating virtual private networks. * It is a specification defined by Microsoft and some vendors * working with Microsoft. PPTP is built on top of a modified * version of the Internet Generic Routing Encapsulation Protocol. * GRE is defined in RFC 1701 and RFC 1702. Documentation of * PPTP can be found in RFC 2637 * * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> * * Development of this code funded by Astaro AG (http://www.astaro.com/) * * Limitations: * - We blindly assume that control connections are always * established in PNS->PAC direction. This is a violation * of RFFC2673 * - We can only support one single call within each session * TODO: * - testing of incoming PPTP calls */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/tcp.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> #include <linux/netfilter/nf_conntrack_pptp.h> #define NF_CT_PPTP_VERSION "3.1" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP"); MODULE_ALIAS("ip_conntrack_pptp"); MODULE_ALIAS_NFCT_HELPER("pptp"); static DEFINE_SPINLOCK(nf_pptp_lock); int (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound); int (*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound); void (*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *expect_orig, struct nf_conntrack_expect *expect_reply) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_exp_gre); void (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp) __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) /* PptpControlMessageType names */ const char *const pptp_msg_name[] = { "UNKNOWN_MESSAGE", "START_SESSION_REQUEST", "START_SESSION_REPLY", "STOP_SESSION_REQUEST", "STOP_SESSION_REPLY", "ECHO_REQUEST", "ECHO_REPLY", "OUT_CALL_REQUEST", "OUT_CALL_REPLY", "IN_CALL_REQUEST", "IN_CALL_REPLY", "IN_CALL_CONNECT", "CALL_CLEAR_REQUEST", "CALL_DISCONNECT_NOTIFY", "WAN_ERROR_NOTIFY", "SET_LINK_INFO" }; EXPORT_SYMBOL(pptp_msg_name); #endif #define SECS *HZ #define MINS * 60 SECS #define HOURS * 60 MINS #define PPTP_GRE_TIMEOUT (10 MINS) #define PPTP_GRE_STREAM_TIMEOUT (5 HOURS) static void pptp_expectfn(struct nf_conn *ct, struct nf_conntrack_expect *exp) { struct net *net = nf_ct_net(ct); typeof(nf_nat_pptp_hook_expectfn) nf_nat_pptp_expectfn; pr_debug("increasing timeouts\n"); /* increase timeout of GRE data channel conntrack entry */ ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT; /* Can you see how rusty this code is, compared with the pre-2.6.11 * one? That's what happened to my shiny newnat of 2002 ;( -HW */ rcu_read_lock(); nf_nat_pptp_expectfn = rcu_dereference(nf_nat_pptp_hook_expectfn); if (nf_nat_pptp_expectfn && ct->master->status & IPS_NAT_MASK) nf_nat_pptp_expectfn(ct, exp); else { struct nf_conntrack_tuple inv_t; struct nf_conntrack_expect *exp_other; /* obviously this tuple inversion only works until you do NAT */ nf_ct_invert_tuplepr(&inv_t, &exp->tuple); pr_debug("trying to unexpect other dir: "); nf_ct_dump_tuple(&inv_t); exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t); if (exp_other) { /* delete other expectation. */ pr_debug("found\n"); nf_ct_unexpect_related(exp_other); nf_ct_expect_put(exp_other); } else { pr_debug("not found\n"); } } rcu_read_unlock(); } static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct, const struct nf_conntrack_tuple *t) { const struct nf_conntrack_tuple_hash *h; struct nf_conntrack_expect *exp; struct nf_conn *sibling; u16 zone = nf_ct_zone(ct); pr_debug("trying to timeout ct or exp for tuple "); nf_ct_dump_tuple(t); h = nf_conntrack_find_get(net, zone, t); if (h) { sibling = nf_ct_tuplehash_to_ctrack(h); pr_debug("setting timeout of conntrack %p to 0\n", sibling); sibling->proto.gre.timeout = 0; sibling->proto.gre.stream_timeout = 0; if (del_timer(&sibling->timeout)) sibling->timeout.function((unsigned long)sibling); nf_ct_put(sibling); return 1; } else { exp = nf_ct_expect_find_get(net, zone, t); if (exp) { pr_debug("unexpect_related of expect %p\n", exp); nf_ct_unexpect_related(exp); nf_ct_expect_put(exp); return 1; } } return 0; } /* timeout GRE data connections */ static void pptp_destroy_siblings(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); const struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_tuple t; nf_ct_gre_keymap_destroy(ct); /* try original (pns->pac) tuple */ memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t)); t.dst.protonum = IPPROTO_GRE; t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; if (!destroy_sibling_or_exp(net, ct, &t)) pr_debug("failed to timeout original pns->pac ct/exp\n"); /* try reply (pac->pns) tuple */ memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); t.dst.protonum = IPPROTO_GRE; t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; if (!destroy_sibling_or_exp(net, ct, &t)) pr_debug("failed to timeout reply pac->pns ct/exp\n"); } /* expect GRE connections (PNS->PAC and PAC->PNS direction) */ static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid) { struct nf_conntrack_expect *exp_orig, *exp_reply; enum ip_conntrack_dir dir; int ret = 1; typeof(nf_nat_pptp_hook_exp_gre) nf_nat_pptp_exp_gre; exp_orig = nf_ct_expect_alloc(ct); if (exp_orig == NULL) goto out; exp_reply = nf_ct_expect_alloc(ct); if (exp_reply == NULL) goto out_put_orig; /* original direction, PNS->PAC */ dir = IP_CT_DIR_ORIGINAL; nf_ct_expect_init(exp_orig, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[dir].tuple.dst.u3, IPPROTO_GRE, &peer_callid, &callid); exp_orig->expectfn = pptp_expectfn; /* reply direction, PAC->PNS */ dir = IP_CT_DIR_REPLY; nf_ct_expect_init(exp_reply, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), &ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[dir].tuple.dst.u3, IPPROTO_GRE, &callid, &peer_callid); exp_reply->expectfn = pptp_expectfn; nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre); if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) nf_nat_pptp_exp_gre(exp_orig, exp_reply); if (nf_ct_expect_related(exp_orig) != 0) goto out_put_both; if (nf_ct_expect_related(exp_reply) != 0) goto out_unexpect_orig; /* Add GRE keymap entries */ if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_ORIGINAL, &exp_orig->tuple) != 0) goto out_unexpect_both; if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_REPLY, &exp_reply->tuple) != 0) { nf_ct_gre_keymap_destroy(ct); goto out_unexpect_both; } ret = 0; out_put_both: nf_ct_expect_put(exp_reply); out_put_orig: nf_ct_expect_put(exp_orig); out: return ret; out_unexpect_both: nf_ct_unexpect_related(exp_reply); out_unexpect_orig: nf_ct_unexpect_related(exp_orig); goto out_put_both; } static inline int pptp_inbound_pkt(struct sk_buff *skb, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq, unsigned int reqlen, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; u_int16_t msg; __be16 cid = 0, pcid = 0; typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; msg = ntohs(ctlh->messageType); pr_debug("inbound control message %s\n", pptp_msg_name[msg]); switch (msg) { case PPTP_START_SESSION_REPLY: /* server confirms new control session */ if (info->sstate < PPTP_SESSION_REQUESTED) goto invalid; if (pptpReq->srep.resultCode == PPTP_START_OK) info->sstate = PPTP_SESSION_CONFIRMED; else info->sstate = PPTP_SESSION_ERROR; break; case PPTP_STOP_SESSION_REPLY: /* server confirms end of control session */ if (info->sstate > PPTP_SESSION_STOPREQ) goto invalid; if (pptpReq->strep.resultCode == PPTP_STOP_OK) info->sstate = PPTP_SESSION_NONE; else info->sstate = PPTP_SESSION_ERROR; break; case PPTP_OUT_CALL_REPLY: /* server accepted call, we now expect GRE frames */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; if (info->cstate != PPTP_CALL_OUT_REQ && info->cstate != PPTP_CALL_OUT_CONF) goto invalid; cid = pptpReq->ocack.callID; pcid = pptpReq->ocack.peersCallID; if (info->pns_call_id != pcid) goto invalid; pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], ntohs(cid), ntohs(pcid)); if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { info->cstate = PPTP_CALL_OUT_CONF; info->pac_call_id = cid; exp_gre(ct, cid, pcid); } else info->cstate = PPTP_CALL_NONE; break; case PPTP_IN_CALL_REQUEST: /* server tells us about incoming call request */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; cid = pptpReq->icreq.callID; pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); info->cstate = PPTP_CALL_IN_REQ; info->pac_call_id = cid; break; case PPTP_IN_CALL_CONNECT: /* server tells us about incoming call established */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; if (info->cstate != PPTP_CALL_IN_REP && info->cstate != PPTP_CALL_IN_CONF) goto invalid; pcid = pptpReq->iccon.peersCallID; cid = info->pac_call_id; if (info->pns_call_id != pcid) goto invalid; pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); info->cstate = PPTP_CALL_IN_CONF; /* we expect a GRE connection from PAC to PNS */ exp_gre(ct, cid, pcid); break; case PPTP_CALL_DISCONNECT_NOTIFY: /* server confirms disconnect */ cid = pptpReq->disc.callID; pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); info->cstate = PPTP_CALL_NONE; /* untrack this call id, unexpect GRE packets */ pptp_destroy_siblings(ct); break; case PPTP_WAN_ERROR_NOTIFY: case PPTP_SET_LINK_INFO: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* I don't have to explain these ;) */ break; default: goto invalid; } nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound); if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK) return nf_nat_pptp_inbound(skb, ct, ctinfo, ctlh, pptpReq); return NF_ACCEPT; invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; } static inline int pptp_outbound_pkt(struct sk_buff *skb, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq, unsigned int reqlen, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; u_int16_t msg; __be16 cid = 0, pcid = 0; typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; msg = ntohs(ctlh->messageType); pr_debug("outbound control message %s\n", pptp_msg_name[msg]); switch (msg) { case PPTP_START_SESSION_REQUEST: /* client requests for new control session */ if (info->sstate != PPTP_SESSION_NONE) goto invalid; info->sstate = PPTP_SESSION_REQUESTED; break; case PPTP_STOP_SESSION_REQUEST: /* client requests end of control session */ info->sstate = PPTP_SESSION_STOPREQ; break; case PPTP_OUT_CALL_REQUEST: /* client initiating connection to server */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; info->cstate = PPTP_CALL_OUT_REQ; /* track PNS call id */ cid = pptpReq->ocreq.callID; pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); info->pns_call_id = cid; break; case PPTP_IN_CALL_REPLY: /* client answers incoming call */ if (info->cstate != PPTP_CALL_IN_REQ && info->cstate != PPTP_CALL_IN_REP) goto invalid; cid = pptpReq->icack.callID; pcid = pptpReq->icack.peersCallID; if (info->pac_call_id != pcid) goto invalid; pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], ntohs(cid), ntohs(pcid)); if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { /* part two of the three-way handshake */ info->cstate = PPTP_CALL_IN_REP; info->pns_call_id = cid; } else info->cstate = PPTP_CALL_NONE; break; case PPTP_CALL_CLEAR_REQUEST: /* client requests hangup of call */ if (info->sstate != PPTP_SESSION_CONFIRMED) goto invalid; /* FUTURE: iterate over all calls and check if * call ID is valid. We don't do this without newnat, * because we only know about last call */ info->cstate = PPTP_CALL_CLEAR_REQ; break; case PPTP_SET_LINK_INFO: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* I don't have to explain these ;) */ break; default: goto invalid; } nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound); if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK) return nf_nat_pptp_outbound(skb, ct, ctinfo, ctlh, pptpReq); return NF_ACCEPT; invalid: pr_debug("invalid %s: type=%d cid=%u pcid=%u " "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, ntohs(info->pns_call_id), ntohs(info->pac_call_id)); return NF_ACCEPT; } static const unsigned int pptp_msg_size[] = { [PPTP_START_SESSION_REQUEST] = sizeof(struct PptpStartSessionRequest), [PPTP_START_SESSION_REPLY] = sizeof(struct PptpStartSessionReply), [PPTP_STOP_SESSION_REQUEST] = sizeof(struct PptpStopSessionRequest), [PPTP_STOP_SESSION_REPLY] = sizeof(struct PptpStopSessionReply), [PPTP_OUT_CALL_REQUEST] = sizeof(struct PptpOutCallRequest), [PPTP_OUT_CALL_REPLY] = sizeof(struct PptpOutCallReply), [PPTP_IN_CALL_REQUEST] = sizeof(struct PptpInCallRequest), [PPTP_IN_CALL_REPLY] = sizeof(struct PptpInCallReply), [PPTP_IN_CALL_CONNECT] = sizeof(struct PptpInCallConnected), [PPTP_CALL_CLEAR_REQUEST] = sizeof(struct PptpClearCallRequest), [PPTP_CALL_DISCONNECT_NOTIFY] = sizeof(struct PptpCallDisconnectNotify), [PPTP_WAN_ERROR_NOTIFY] = sizeof(struct PptpWanErrorNotify), [PPTP_SET_LINK_INFO] = sizeof(struct PptpSetLinkInfo), }; /* track caller id inside control connection, call expect_related */ static int conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { int dir = CTINFO2DIR(ctinfo); const struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; const struct tcphdr *tcph; struct tcphdr _tcph; const struct pptp_pkt_hdr *pptph; struct pptp_pkt_hdr _pptph; struct PptpControlHeader _ctlh, *ctlh; union pptp_ctrl_union _pptpReq, *pptpReq; unsigned int tcplen = skb->len - protoff; unsigned int datalen, reqlen, nexthdr_off; int oldsstate, oldcstate; int ret; u_int16_t msg; /* don't do any tracking before tcp handshake complete */ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; nexthdr_off = protoff; tcph = skb_header_pointer(skb, nexthdr_off, sizeof(_tcph), &_tcph); BUG_ON(!tcph); nexthdr_off += tcph->doff * 4; datalen = tcplen - tcph->doff * 4; pptph = skb_header_pointer(skb, nexthdr_off, sizeof(_pptph), &_pptph); if (!pptph) { pr_debug("no full PPTP header, can't track\n"); return NF_ACCEPT; } nexthdr_off += sizeof(_pptph); datalen -= sizeof(_pptph); /* if it's not a control message we can't do anything with it */ if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL || ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) { pr_debug("not a control packet\n"); return NF_ACCEPT; } ctlh = skb_header_pointer(skb, nexthdr_off, sizeof(_ctlh), &_ctlh); if (!ctlh) return NF_ACCEPT; nexthdr_off += sizeof(_ctlh); datalen -= sizeof(_ctlh); reqlen = datalen; msg = ntohs(ctlh->messageType); if (msg > 0 && msg <= PPTP_MSG_MAX && reqlen < pptp_msg_size[msg]) return NF_ACCEPT; if (reqlen > sizeof(*pptpReq)) reqlen = sizeof(*pptpReq); pptpReq = skb_header_pointer(skb, nexthdr_off, reqlen, &_pptpReq); if (!pptpReq) return NF_ACCEPT; oldsstate = info->sstate; oldcstate = info->cstate; spin_lock_bh(&nf_pptp_lock); /* FIXME: We just blindly assume that the control connection is always * established from PNS->PAC. However, RFC makes no guarantee */ if (dir == IP_CT_DIR_ORIGINAL) /* client -> server (PNS -> PAC) */ ret = pptp_outbound_pkt(skb, ctlh, pptpReq, reqlen, ct, ctinfo); else /* server -> client (PAC -> PNS) */ ret = pptp_inbound_pkt(skb, ctlh, pptpReq, reqlen, ct, ctinfo); pr_debug("sstate: %d->%d, cstate: %d->%d\n", oldsstate, info->sstate, oldcstate, info->cstate); spin_unlock_bh(&nf_pptp_lock); return ret; } static const struct nf_conntrack_expect_policy pptp_exp_policy = { .max_expected = 2, .timeout = 5 * 60, }; /* control protocol helper */ static struct nf_conntrack_helper pptp __read_mostly = { .name = "pptp", .me = THIS_MODULE, .tuple.src.l3num = AF_INET, .tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT), .tuple.dst.protonum = IPPROTO_TCP, .help = conntrack_pptp_help, .destroy = pptp_destroy_siblings, .expect_policy = &pptp_exp_policy, }; static void nf_conntrack_pptp_net_exit(struct net *net) { nf_ct_gre_keymap_flush(net); } static struct pernet_operations nf_conntrack_pptp_net_ops = { .exit = nf_conntrack_pptp_net_exit, }; static int __init nf_conntrack_pptp_init(void) { int rv; rv = nf_conntrack_helper_register(&pptp); if (rv < 0) return rv; rv = register_pernet_subsys(&nf_conntrack_pptp_net_ops); if (rv < 0) nf_conntrack_helper_unregister(&pptp); return rv; } static void __exit nf_conntrack_pptp_fini(void) { nf_conntrack_helper_unregister(&pptp); unregister_pernet_subsys(&nf_conntrack_pptp_net_ops); } module_init(nf_conntrack_pptp_init); module_exit(nf_conntrack_pptp_fini);
gpl-2.0
Grarak/grakernel-msm8930
lib/gen_crc32table.c
4923
3318
#include <stdio.h> #include "../include/generated/autoconf.h" #include "crc32defs.h" #include <inttypes.h> #define ENTRIES_PER_LINE 4 #if CRC_LE_BITS > 8 # define LE_TABLE_ROWS (CRC_LE_BITS/8) # define LE_TABLE_SIZE 256 #else # define LE_TABLE_ROWS 1 # define LE_TABLE_SIZE (1 << CRC_LE_BITS) #endif #if CRC_BE_BITS > 8 # define BE_TABLE_ROWS (CRC_BE_BITS/8) # define BE_TABLE_SIZE 256 #else # define BE_TABLE_ROWS 1 # define BE_TABLE_SIZE (1 << CRC_BE_BITS) #endif static uint32_t crc32table_le[LE_TABLE_ROWS][256]; static uint32_t crc32table_be[BE_TABLE_ROWS][256]; static uint32_t crc32ctable_le[LE_TABLE_ROWS][256]; /** * crc32init_le() - allocate and initialize LE table data * * crc is the crc of the byte i; other entries are filled in based on the * fact that crctable[i^j] = crctable[i] ^ crctable[j]. * */ static void crc32init_le_generic(const uint32_t polynomial, uint32_t (*tab)[256]) { unsigned i, j; uint32_t crc = 1; tab[0][0] = 0; for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) { crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) tab[0][i + j] = crc ^ tab[0][j]; } for (i = 0; i < LE_TABLE_SIZE; i++) { crc = tab[0][i]; for (j = 1; j < LE_TABLE_ROWS; j++) { crc = tab[0][crc & 0xff] ^ (crc >> 8); tab[j][i] = crc; } } } static void crc32init_le(void) { crc32init_le_generic(CRCPOLY_LE, crc32table_le); } static void crc32cinit_le(void) { crc32init_le_generic(CRC32C_POLY_LE, crc32ctable_le); } /** * crc32init_be() - allocate and initialize BE table data */ static void crc32init_be(void) { unsigned i, j; uint32_t crc = 0x80000000; crc32table_be[0][0] = 0; for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); for (j = 0; j < i; j++) crc32table_be[0][i + j] = crc ^ crc32table_be[0][j]; } for (i = 0; i < BE_TABLE_SIZE; i++) { crc = crc32table_be[0][i]; for (j = 1; j < BE_TABLE_ROWS; j++) { crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8); crc32table_be[j][i] = crc; } } } static void output_table(uint32_t (*table)[256], int rows, int len, char *trans) { int i, j; for (j = 0 ; j < rows; j++) { printf("{"); for (i = 0; i < len - 1; i++) { if (i % ENTRIES_PER_LINE == 0) printf("\n"); printf("%s(0x%8.8xL), ", trans, table[j][i]); } printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]); } } int main(int argc, char** argv) { printf("/* this file is generated - do not edit */\n\n"); if (CRC_LE_BITS > 1) { crc32init_le(); printf("static const u32 __cacheline_aligned " "crc32table_le[%d][%d] = {", LE_TABLE_ROWS, LE_TABLE_SIZE); output_table(crc32table_le, LE_TABLE_ROWS, LE_TABLE_SIZE, "tole"); printf("};\n"); } if (CRC_BE_BITS > 1) { crc32init_be(); printf("static const u32 __cacheline_aligned " "crc32table_be[%d][%d] = {", BE_TABLE_ROWS, BE_TABLE_SIZE); output_table(crc32table_be, LE_TABLE_ROWS, BE_TABLE_SIZE, "tobe"); printf("};\n"); } if (CRC_LE_BITS > 1) { crc32cinit_le(); printf("static const u32 __cacheline_aligned " "crc32ctable_le[%d][%d] = {", LE_TABLE_ROWS, LE_TABLE_SIZE); output_table(crc32ctable_le, LE_TABLE_ROWS, LE_TABLE_SIZE, "tole"); printf("};\n"); } return 0; }
gpl-2.0
AOKP/kernel_htc_msm8960
drivers/media/video/tlg2300/pd-main.c
4923
12333
/* * device driver for Telegent tlg2300 based TV cards * * Author : * Kang Yong <kangyong@telegent.com> * Zhang Xiaobing <xbzhang@telegent.com> * Huang Shijie <zyziii@telegent.com> or <shijie8@gmail.com> * * (c) 2009 Telegent Systems * (c) 2010 Telegent Systems * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/suspend.h> #include <linux/usb/quirks.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/types.h> #include <linux/firmware.h> #include "vendorcmds.h" #include "pd-common.h" #define VENDOR_ID 0x1B24 #define PRODUCT_ID 0x4001 static struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, PRODUCT_ID, 255, 1, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID, PRODUCT_ID, 255, 1, 1) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); int debug_mode; module_param(debug_mode, int, 0644); MODULE_PARM_DESC(debug_mode, "0 = disable, 1 = enable, 2 = verbose"); static const char *firmware_name = "tlg2300_firmware.bin"; static struct usb_driver poseidon_driver; static LIST_HEAD(pd_device_list); /* * send set request to USB firmware. */ s32 send_set_req(struct poseidon *pd, u8 cmdid, s32 param, s32 *cmd_status) { s32 ret; s8 data[32] = {}; u16 lower_16, upper_16; if (pd->state & POSEIDON_STATE_DISCONNECT) return -ENODEV; mdelay(30); if (param == 0) { upper_16 = lower_16 = 0; } else { /* send 32 bit param as two 16 bit param,little endian */ lower_16 = (unsigned short)(param & 0xffff); upper_16 = (unsigned short)((param >> 16) & 0xffff); } ret = usb_control_msg(pd->udev, usb_rcvctrlpipe(pd->udev, 0), REQ_SET_CMD | cmdid, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, lower_16, upper_16, &data, sizeof(*cmd_status), USB_CTRL_GET_TIMEOUT); if (!ret) { return -ENXIO; } else { /* 1st 4 bytes into cmd_status */ memcpy((char *)cmd_status, &(data[0]), sizeof(*cmd_status)); } return 0; } /* * send get request to Poseidon firmware. */ s32 send_get_req(struct poseidon *pd, u8 cmdid, s32 param, void *buf, s32 *cmd_status, s32 datalen) { s32 ret; s8 data[128] = {}; u16 lower_16, upper_16; if (pd->state & POSEIDON_STATE_DISCONNECT) return -ENODEV; mdelay(30); if (param == 0) { upper_16 = lower_16 = 0; } else { /*send 32 bit param as two 16 bit param, little endian */ lower_16 = (unsigned short)(param & 0xffff); upper_16 = (unsigned short)((param >> 16) & 0xffff); } ret = usb_control_msg(pd->udev, usb_rcvctrlpipe(pd->udev, 0), REQ_GET_CMD | cmdid, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, lower_16, upper_16, &data, (datalen + sizeof(*cmd_status)), USB_CTRL_GET_TIMEOUT); if (ret < 0) { return -ENXIO; } else { /* 1st 4 bytes into cmd_status, remaining data into cmd_data */ memcpy((char *)cmd_status, &data[0], sizeof(*cmd_status)); memcpy((char *)buf, &data[sizeof(*cmd_status)], datalen); } return 0; } static int pm_notifier_block(struct notifier_block *nb, unsigned long event, void *dummy) { struct poseidon *pd = NULL; struct list_head *node, *next; switch (event) { case PM_POST_HIBERNATION: list_for_each_safe(node, next, &pd_device_list) { struct usb_device *udev; struct usb_interface *iface; int rc = 0; pd = container_of(node, struct poseidon, device_list); udev = pd->udev; iface = pd->interface; /* It will cause the system to reload the firmware */ rc = usb_lock_device_for_reset(udev, iface); if (rc >= 0) { usb_reset_device(udev); usb_unlock_device(udev); } } break; default: break; } log("event :%ld\n", event); return 0; } static struct notifier_block pm_notifer = { .notifier_call = pm_notifier_block, }; int set_tuner_mode(struct poseidon *pd, unsigned char mode) { s32 ret, cmd_status; if (pd->state & POSEIDON_STATE_DISCONNECT) return -ENODEV; ret = send_set_req(pd, TUNE_MODE_SELECT, mode, &cmd_status); if (ret || cmd_status) return -ENXIO; return 0; } void poseidon_delete(struct kref *kref) { struct poseidon *pd = container_of(kref, struct poseidon, kref); if (!pd) return; list_del_init(&pd->device_list); pd_dvb_usb_device_cleanup(pd); /* clean_audio_data(&pd->audio_data);*/ if (pd->udev) { usb_put_dev(pd->udev); pd->udev = NULL; } if (pd->interface) { usb_put_intf(pd->interface); pd->interface = NULL; } kfree(pd); log(); } static int firmware_download(struct usb_device *udev) { int ret = 0, actual_length; const struct firmware *fw = NULL; void *fwbuf = NULL; size_t fwlength = 0, offset; size_t max_packet_size; ret = request_firmware(&fw, firmware_name, &udev->dev); if (ret) { log("download err : %d", ret); return ret; } fwlength = fw->size; fwbuf = kmemdup(fw->data, fwlength, GFP_KERNEL); if (!fwbuf) { ret = -ENOMEM; goto out; } max_packet_size = udev->ep_out[0x1]->desc.wMaxPacketSize; log("\t\t download size : %d", (int)max_packet_size); for (offset = 0; offset < fwlength; offset += max_packet_size) { actual_length = 0; ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01), /* ep 1 */ fwbuf + offset, min(max_packet_size, fwlength - offset), &actual_length, HZ * 10); if (ret) break; } kfree(fwbuf); out: release_firmware(fw); return ret; } static inline struct poseidon *get_pd(struct usb_interface *intf) { return usb_get_intfdata(intf); } #ifdef CONFIG_PM /* one-to-one map : poseidon{} <----> usb_device{}'s port */ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev) { pd->portnum = udev->portnum; } static inline int get_autopm_ref(struct poseidon *pd) { return pd->video_data.users + pd->vbi_data.users + pd->audio.users + atomic_read(&pd->dvb_data.users) + pd->radio_data.users; } /* fixup something for poseidon */ static inline struct poseidon *fixup(struct poseidon *pd) { int count; /* old udev and interface have gone, so put back reference . */ count = get_autopm_ref(pd); log("count : %d, ref count : %d", count, get_pm_count(pd)); while (count--) usb_autopm_put_interface(pd->interface); /*usb_autopm_set_interface(pd->interface); */ usb_put_dev(pd->udev); usb_put_intf(pd->interface); log("event : %d\n", pd->msg.event); return pd; } static struct poseidon *find_old_poseidon(struct usb_device *udev) { struct poseidon *pd; list_for_each_entry(pd, &pd_device_list, device_list) { if (pd->portnum == udev->portnum && in_hibernation(pd)) return fixup(pd); } return NULL; } /* Is the card working now ? */ static inline int is_working(struct poseidon *pd) { return get_pm_count(pd) > 0; } static int poseidon_suspend(struct usb_interface *intf, pm_message_t msg) { struct poseidon *pd = get_pd(intf); if (!pd) return 0; if (!is_working(pd)) { if (get_pm_count(pd) <= 0 && !in_hibernation(pd)) { pd->msg.event = PM_EVENT_AUTO_SUSPEND; pd->pm_resume = NULL; /* a good guard */ printk(KERN_DEBUG "\n\t+ TLG2300 auto suspend +\n\n"); } return 0; } pd->msg = msg; /* save it here */ logpm(pd); return pd->pm_suspend ? pd->pm_suspend(pd) : 0; } static int poseidon_resume(struct usb_interface *intf) { struct poseidon *pd = get_pd(intf); if (!pd) return 0; printk(KERN_DEBUG "\n\t ++ TLG2300 resume ++\n\n"); if (!is_working(pd)) { if (PM_EVENT_AUTO_SUSPEND == pd->msg.event) pd->msg = PMSG_ON; return 0; } if (in_hibernation(pd)) { logpm(pd); return 0; } logpm(pd); return pd->pm_resume ? pd->pm_resume(pd) : 0; } static void hibernation_resume(struct work_struct *w) { struct poseidon *pd = container_of(w, struct poseidon, pm_work); int count; pd->msg.event = 0; /* clear it here */ pd->state &= ~POSEIDON_STATE_DISCONNECT; /* set the new interface's reference */ count = get_autopm_ref(pd); while (count--) usb_autopm_get_interface(pd->interface); /* resume the context */ logpm(pd); if (pd->pm_resume) pd->pm_resume(pd); } #else /* CONFIG_PM is not enabled: */ static inline struct poseidon *find_old_poseidon(struct usb_device *udev) { return NULL; } static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev) { } #endif static int check_firmware(struct usb_device *udev, int *down_firmware) { void *buf; int ret; struct cmd_firmware_vers_s *cmd_firm; buf = kzalloc(sizeof(*cmd_firm) + sizeof(u32), GFP_KERNEL); if (!buf) return -ENOMEM; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), REQ_GET_CMD | GET_FW_ID, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, sizeof(*cmd_firm) + sizeof(u32), USB_CTRL_GET_TIMEOUT); kfree(buf); if (ret < 0) { *down_firmware = 1; return firmware_download(udev); } return 0; } static int poseidon_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct poseidon *pd = NULL; int ret = 0; int new_one = 0; /* download firmware */ check_firmware(udev, &ret); if (ret) return 0; /* Do I recovery from the hibernate ? */ pd = find_old_poseidon(udev); if (!pd) { pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return -ENOMEM; kref_init(&pd->kref); set_map_flags(pd, udev); new_one = 1; } pd->udev = usb_get_dev(udev); pd->interface = usb_get_intf(interface); usb_set_intfdata(interface, pd); if (new_one) { struct device *dev = &interface->dev; logpm(pd); mutex_init(&pd->lock); /* register v4l2 device */ snprintf(pd->v4l2_dev.name, sizeof(pd->v4l2_dev.name), "%s %s", dev->driver->name, dev_name(dev)); ret = v4l2_device_register(NULL, &pd->v4l2_dev); /* register devices in directory /dev */ ret = pd_video_init(pd); poseidon_audio_init(pd); poseidon_fm_init(pd); pd_dvb_usb_device_init(pd); INIT_LIST_HEAD(&pd->device_list); list_add_tail(&pd->device_list, &pd_device_list); } device_init_wakeup(&udev->dev, 1); #ifdef CONFIG_PM pm_runtime_set_autosuspend_delay(&pd->udev->dev, 1000 * PM_SUSPEND_DELAY); usb_enable_autosuspend(pd->udev); if (in_hibernation(pd)) { INIT_WORK(&pd->pm_work, hibernation_resume); schedule_work(&pd->pm_work); } #endif return 0; } static void poseidon_disconnect(struct usb_interface *interface) { struct poseidon *pd = get_pd(interface); if (!pd) return; logpm(pd); if (in_hibernation(pd)) return; mutex_lock(&pd->lock); pd->state |= POSEIDON_STATE_DISCONNECT; mutex_unlock(&pd->lock); /* stop urb transferring */ stop_all_video_stream(pd); dvb_stop_streaming(&pd->dvb_data); /*unregister v4l2 device */ v4l2_device_unregister(&pd->v4l2_dev); pd_dvb_usb_device_exit(pd); poseidon_fm_exit(pd); poseidon_audio_free(pd); pd_video_exit(pd); usb_set_intfdata(interface, NULL); kref_put(&pd->kref, poseidon_delete); } static struct usb_driver poseidon_driver = { .name = "poseidon", .probe = poseidon_probe, .disconnect = poseidon_disconnect, .id_table = id_table, #ifdef CONFIG_PM .suspend = poseidon_suspend, .resume = poseidon_resume, #endif .supports_autosuspend = 1, }; static int __init poseidon_init(void) { int ret; ret = usb_register(&poseidon_driver); if (ret) return ret; register_pm_notifier(&pm_notifer); return ret; } static void __exit poseidon_exit(void) { log(); unregister_pm_notifier(&pm_notifer); usb_deregister(&poseidon_driver); } module_init(poseidon_init); module_exit(poseidon_exit); MODULE_AUTHOR("Telegent Systems"); MODULE_DESCRIPTION("For tlg2300-based USB device "); MODULE_LICENSE("GPL"); MODULE_VERSION("0.0.2");
gpl-2.0
mukulsoni/android_kernel_samsung_ms013g-new
drivers/usb/host/alchemy-common.c
5179
15699
/* * USB block power/access management abstraction. * * Au1000+: The OHCI block control register is at the far end of the OHCI memory * area. Au1550 has OHCI on different base address. No need to handle * UDC here. * Au1200: one register to control access and clocks to O/EHCI, UDC and OTG * as well as the PHY for EHCI and UDC. * */ #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/syscore_ops.h> #include <asm/mach-au1x00/au1000.h> /* control register offsets */ #define AU1000_OHCICFG 0x7fffc #define AU1550_OHCICFG 0x07ffc #define AU1200_USBCFG 0x04 /* Au1000 USB block config bits */ #define USBHEN_RD (1 << 4) /* OHCI reset-done indicator */ #define USBHEN_CE (1 << 3) /* OHCI block clock enable */ #define USBHEN_E (1 << 2) /* OHCI block enable */ #define USBHEN_C (1 << 1) /* OHCI block coherency bit */ #define USBHEN_BE (1 << 0) /* OHCI Big-Endian */ /* Au1200 USB config bits */ #define USBCFG_PFEN (1 << 31) /* prefetch enable (undoc) */ #define USBCFG_RDCOMB (1 << 30) /* read combining (undoc) */ #define USBCFG_UNKNOWN (5 << 20) /* unknown, leave this way */ #define USBCFG_SSD (1 << 23) /* serial short detect en */ #define USBCFG_PPE (1 << 19) /* HS PHY PLL */ #define USBCFG_UCE (1 << 18) /* UDC clock enable */ #define USBCFG_ECE (1 << 17) /* EHCI clock enable */ #define USBCFG_OCE (1 << 16) /* OHCI clock enable */ #define USBCFG_FLA(x) (((x) & 0x3f) << 8) #define USBCFG_UCAM (1 << 7) /* coherent access (undoc) */ #define USBCFG_GME (1 << 6) /* OTG mem access */ #define USBCFG_DBE (1 << 5) /* UDC busmaster enable */ #define USBCFG_DME (1 << 4) /* UDC mem enable */ #define USBCFG_EBE (1 << 3) /* EHCI busmaster enable */ #define USBCFG_EME (1 << 2) /* EHCI mem enable */ #define USBCFG_OBE (1 << 1) /* OHCI busmaster enable */ #define USBCFG_OME (1 << 0) /* OHCI mem enable */ #define USBCFG_INIT_AU1200 (USBCFG_PFEN | USBCFG_RDCOMB | USBCFG_UNKNOWN |\ USBCFG_SSD | USBCFG_FLA(0x20) | USBCFG_UCAM | \ USBCFG_GME | USBCFG_DBE | USBCFG_DME | \ USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \ USBCFG_OME) /* Au1300 USB config registers */ #define USB_DWC_CTRL1 0x00 #define USB_DWC_CTRL2 0x04 #define USB_VBUS_TIMER 0x10 #define USB_SBUS_CTRL 0x14 #define USB_MSR_ERR 0x18 #define USB_DWC_CTRL3 0x1C #define USB_DWC_CTRL4 0x20 #define USB_OTG_STATUS 0x28 #define USB_DWC_CTRL5 0x2C #define USB_DWC_CTRL6 0x30 #define USB_DWC_CTRL7 0x34 #define USB_PHY_STATUS 0xC0 #define USB_INT_STATUS 0xC4 #define USB_INT_ENABLE 0xC8 #define USB_DWC_CTRL1_OTGD 0x04 /* set to DISable OTG */ #define USB_DWC_CTRL1_HSTRS 0x02 /* set to ENable EHCI */ #define USB_DWC_CTRL1_DCRS 0x01 /* set to ENable UDC */ #define USB_DWC_CTRL2_PHY1RS 0x04 /* set to enable PHY1 */ #define USB_DWC_CTRL2_PHY0RS 0x02 /* set to enable PHY0 */ #define USB_DWC_CTRL2_PHYRS 0x01 /* set to enable PHY */ #define USB_DWC_CTRL3_OHCI1_CKEN (1 << 19) #define USB_DWC_CTRL3_OHCI0_CKEN (1 << 18) #define USB_DWC_CTRL3_EHCI0_CKEN (1 << 17) #define USB_DWC_CTRL3_OTG0_CKEN (1 << 16) #define USB_SBUS_CTRL_SBCA 0x04 /* coherent access */ #define USB_INTEN_FORCE 0x20 #define USB_INTEN_PHY 0x10 #define USB_INTEN_UDC 0x08 #define USB_INTEN_EHCI 0x04 #define USB_INTEN_OHCI1 0x02 #define USB_INTEN_OHCI0 0x01 static DEFINE_SPINLOCK(alchemy_usb_lock); static inline void __au1300_usb_phyctl(void __iomem *base, int enable) { unsigned long r, s; r = __raw_readl(base + USB_DWC_CTRL2); s = __raw_readl(base + USB_DWC_CTRL3); s &= USB_DWC_CTRL3_OHCI1_CKEN | USB_DWC_CTRL3_OHCI0_CKEN | USB_DWC_CTRL3_EHCI0_CKEN | USB_DWC_CTRL3_OTG0_CKEN; if (enable) { /* simply enable all PHYs */ r |= USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS | USB_DWC_CTRL2_PHYRS; __raw_writel(r, base + USB_DWC_CTRL2); wmb(); } else if (!s) { /* no USB block active, do disable all PHYs */ r &= ~(USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS | USB_DWC_CTRL2_PHYRS); __raw_writel(r, base + USB_DWC_CTRL2); wmb(); } } static inline void __au1300_ohci_control(void __iomem *base, int enable, int id) { unsigned long r; if (enable) { __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */ wmb(); r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */ r |= (id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN : USB_DWC_CTRL3_OHCI1_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); __au1300_usb_phyctl(base, enable); /* power up the PHYs */ r = __raw_readl(base + USB_INT_ENABLE); r |= (id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1; __raw_writel(r, base + USB_INT_ENABLE); wmb(); /* reset the OHCI start clock bit */ __raw_writel(0, base + USB_DWC_CTRL7); wmb(); } else { r = __raw_readl(base + USB_INT_ENABLE); r &= ~((id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1); __raw_writel(r, base + USB_INT_ENABLE); wmb(); r = __raw_readl(base + USB_DWC_CTRL3); r &= ~((id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN : USB_DWC_CTRL3_OHCI1_CKEN); __raw_writel(r, base + USB_DWC_CTRL3); wmb(); __au1300_usb_phyctl(base, enable); } } static inline void __au1300_ehci_control(void __iomem *base, int enable) { unsigned long r; if (enable) { r = __raw_readl(base + USB_DWC_CTRL3); r |= USB_DWC_CTRL3_EHCI0_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); r = __raw_readl(base + USB_DWC_CTRL1); r |= USB_DWC_CTRL1_HSTRS; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); __au1300_usb_phyctl(base, enable); r = __raw_readl(base + USB_INT_ENABLE); r |= USB_INTEN_EHCI; __raw_writel(r, base + USB_INT_ENABLE); wmb(); } else { r = __raw_readl(base + USB_INT_ENABLE); r &= ~USB_INTEN_EHCI; __raw_writel(r, base + USB_INT_ENABLE); wmb(); r = __raw_readl(base + USB_DWC_CTRL1); r &= ~USB_DWC_CTRL1_HSTRS; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); r = __raw_readl(base + USB_DWC_CTRL3); r &= ~USB_DWC_CTRL3_EHCI0_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); __au1300_usb_phyctl(base, enable); } } static inline void __au1300_udc_control(void __iomem *base, int enable) { unsigned long r; if (enable) { r = __raw_readl(base + USB_DWC_CTRL1); r |= USB_DWC_CTRL1_DCRS; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); __au1300_usb_phyctl(base, enable); r = __raw_readl(base + USB_INT_ENABLE); r |= USB_INTEN_UDC; __raw_writel(r, base + USB_INT_ENABLE); wmb(); } else { r = __raw_readl(base + USB_INT_ENABLE); r &= ~USB_INTEN_UDC; __raw_writel(r, base + USB_INT_ENABLE); wmb(); r = __raw_readl(base + USB_DWC_CTRL1); r &= ~USB_DWC_CTRL1_DCRS; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); __au1300_usb_phyctl(base, enable); } } static inline void __au1300_otg_control(void __iomem *base, int enable) { unsigned long r; if (enable) { r = __raw_readl(base + USB_DWC_CTRL3); r |= USB_DWC_CTRL3_OTG0_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); r = __raw_readl(base + USB_DWC_CTRL1); r &= ~USB_DWC_CTRL1_OTGD; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); __au1300_usb_phyctl(base, enable); } else { r = __raw_readl(base + USB_DWC_CTRL1); r |= USB_DWC_CTRL1_OTGD; __raw_writel(r, base + USB_DWC_CTRL1); wmb(); r = __raw_readl(base + USB_DWC_CTRL3); r &= ~USB_DWC_CTRL3_OTG0_CKEN; __raw_writel(r, base + USB_DWC_CTRL3); wmb(); __au1300_usb_phyctl(base, enable); } } static inline int au1300_usb_control(int block, int enable) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR); int ret = 0; switch (block) { case ALCHEMY_USB_OHCI0: __au1300_ohci_control(base, enable, 0); break; case ALCHEMY_USB_OHCI1: __au1300_ohci_control(base, enable, 1); break; case ALCHEMY_USB_EHCI0: __au1300_ehci_control(base, enable); break; case ALCHEMY_USB_UDC0: __au1300_udc_control(base, enable); break; case ALCHEMY_USB_OTG0: __au1300_otg_control(base, enable); break; default: ret = -ENODEV; } return ret; } static inline void au1300_usb_init(void) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR); /* set some sane defaults. Note: we don't fiddle with DWC_CTRL4 * here at all: Port 2 routing (EHCI or UDC) must be set either * by boot firmware or platform init code; I can't autodetect * a sane setting. */ __raw_writel(0, base + USB_INT_ENABLE); /* disable all USB irqs */ wmb(); __raw_writel(0, base + USB_DWC_CTRL3); /* disable all clocks */ wmb(); __raw_writel(~0, base + USB_MSR_ERR); /* clear all errors */ wmb(); __raw_writel(~0, base + USB_INT_STATUS); /* clear int status */ wmb(); /* set coherent access bit */ __raw_writel(USB_SBUS_CTRL_SBCA, base + USB_SBUS_CTRL); wmb(); } static inline void __au1200_ohci_control(void __iomem *base, int enable) { unsigned long r = __raw_readl(base + AU1200_USBCFG); if (enable) { __raw_writel(r | USBCFG_OCE, base + AU1200_USBCFG); wmb(); udelay(2000); } else { __raw_writel(r & ~USBCFG_OCE, base + AU1200_USBCFG); wmb(); udelay(1000); } } static inline void __au1200_ehci_control(void __iomem *base, int enable) { unsigned long r = __raw_readl(base + AU1200_USBCFG); if (enable) { __raw_writel(r | USBCFG_ECE | USBCFG_PPE, base + AU1200_USBCFG); wmb(); udelay(1000); } else { if (!(r & USBCFG_UCE)) /* UDC also off? */ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */ __raw_writel(r & ~USBCFG_ECE, base + AU1200_USBCFG); wmb(); udelay(1000); } } static inline void __au1200_udc_control(void __iomem *base, int enable) { unsigned long r = __raw_readl(base + AU1200_USBCFG); if (enable) { __raw_writel(r | USBCFG_UCE | USBCFG_PPE, base + AU1200_USBCFG); wmb(); } else { if (!(r & USBCFG_ECE)) /* EHCI also off? */ r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */ __raw_writel(r & ~USBCFG_UCE, base + AU1200_USBCFG); wmb(); } } static inline int au1200_coherency_bug(void) { #if defined(CONFIG_DMA_COHERENT) /* Au1200 AB USB does not support coherent memory */ if (!(read_c0_prid() & 0xff)) { printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n"); printk(KERN_INFO "Au1200 USB: update your board or re-configure" " the kernel\n"); return -ENODEV; } #endif return 0; } static inline int au1200_usb_control(int block, int enable) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); int ret = 0; switch (block) { case ALCHEMY_USB_OHCI0: ret = au1200_coherency_bug(); if (ret && enable) goto out; __au1200_ohci_control(base, enable); break; case ALCHEMY_USB_UDC0: __au1200_udc_control(base, enable); break; case ALCHEMY_USB_EHCI0: ret = au1200_coherency_bug(); if (ret && enable) goto out; __au1200_ehci_control(base, enable); break; default: ret = -ENODEV; } out: return ret; } /* initialize USB block(s) to a known working state */ static inline void au1200_usb_init(void) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); __raw_writel(USBCFG_INIT_AU1200, base + AU1200_USBCFG); wmb(); udelay(1000); } static inline void au1000_usb_init(unsigned long rb, int reg) { void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg); unsigned long r = __raw_readl(base); #if defined(__BIG_ENDIAN) r |= USBHEN_BE; #endif r |= USBHEN_C; __raw_writel(r, base); wmb(); udelay(1000); } static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg) { void __iomem *base = (void __iomem *)KSEG1ADDR(rb); unsigned long r = __raw_readl(base + creg); if (enable) { __raw_writel(r | USBHEN_CE, base + creg); wmb(); udelay(1000); __raw_writel(r | USBHEN_CE | USBHEN_E, base + creg); wmb(); udelay(1000); /* wait for reset complete (read reg twice: au1500 erratum) */ while (__raw_readl(base + creg), !(__raw_readl(base + creg) & USBHEN_RD)) udelay(1000); } else { __raw_writel(r & ~(USBHEN_CE | USBHEN_E), base + creg); wmb(); } } static inline int au1000_usb_control(int block, int enable, unsigned long rb, int creg) { int ret = 0; switch (block) { case ALCHEMY_USB_OHCI0: __au1xx0_ohci_control(enable, rb, creg); break; default: ret = -ENODEV; } return ret; } /* * alchemy_usb_control - control Alchemy on-chip USB blocks * @block: USB block to target * @enable: set 1 to enable a block, 0 to disable */ int alchemy_usb_control(int block, int enable) { unsigned long flags; int ret; spin_lock_irqsave(&alchemy_usb_lock, flags); switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: ret = au1000_usb_control(block, enable, AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); break; case ALCHEMY_CPU_AU1550: ret = au1000_usb_control(block, enable, AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); break; case ALCHEMY_CPU_AU1200: ret = au1200_usb_control(block, enable); break; case ALCHEMY_CPU_AU1300: ret = au1300_usb_control(block, enable); break; default: ret = -ENODEV; } spin_unlock_irqrestore(&alchemy_usb_lock, flags); return ret; } EXPORT_SYMBOL_GPL(alchemy_usb_control); static unsigned long alchemy_usb_pmdata[2]; static void au1000_usb_pm(unsigned long br, int creg, int susp) { void __iomem *base = (void __iomem *)KSEG1ADDR(br); if (susp) { alchemy_usb_pmdata[0] = __raw_readl(base + creg); /* There appears to be some undocumented reset register.... */ __raw_writel(0, base + 0x04); wmb(); __raw_writel(0, base + creg); wmb(); } else { __raw_writel(alchemy_usb_pmdata[0], base + creg); wmb(); } } static void au1200_usb_pm(int susp) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1200_USB_OTG_PHYS_ADDR); if (susp) { /* save OTG_CAP/MUX registers which indicate port routing */ /* FIXME: write an OTG driver to do that */ alchemy_usb_pmdata[0] = __raw_readl(base + 0x00); alchemy_usb_pmdata[1] = __raw_readl(base + 0x04); } else { /* restore access to all MMIO areas */ au1200_usb_init(); /* restore OTG_CAP/MUX registers */ __raw_writel(alchemy_usb_pmdata[0], base + 0x00); __raw_writel(alchemy_usb_pmdata[1], base + 0x04); wmb(); } } static void au1300_usb_pm(int susp) { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR); /* remember Port2 routing */ if (susp) { alchemy_usb_pmdata[0] = __raw_readl(base + USB_DWC_CTRL4); } else { au1300_usb_init(); __raw_writel(alchemy_usb_pmdata[0], base + USB_DWC_CTRL4); wmb(); } } static void alchemy_usb_pm(int susp) { switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: au1000_usb_pm(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG, susp); break; case ALCHEMY_CPU_AU1550: au1000_usb_pm(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG, susp); break; case ALCHEMY_CPU_AU1200: au1200_usb_pm(susp); break; case ALCHEMY_CPU_AU1300: au1300_usb_pm(susp); break; } } static int alchemy_usb_suspend(void) { alchemy_usb_pm(1); return 0; } static void alchemy_usb_resume(void) { alchemy_usb_pm(0); } static struct syscore_ops alchemy_usb_pm_ops = { .suspend = alchemy_usb_suspend, .resume = alchemy_usb_resume, }; static int __init alchemy_usb_init(void) { switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); break; case ALCHEMY_CPU_AU1550: au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); break; case ALCHEMY_CPU_AU1200: au1200_usb_init(); break; case ALCHEMY_CPU_AU1300: au1300_usb_init(); break; } register_syscore_ops(&alchemy_usb_pm_ops); return 0; } arch_initcall(alchemy_usb_init);
gpl-2.0
eagleeyetom/android_kernel_mediatek
fs/cifs/ioctl.c
7739
2863
/* * fs/cifs/ioctl.c * * vfs operations that deal with io control * * Copyright (C) International Business Machines Corp., 2005,2007 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifsfs.h" #define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2) long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) { struct inode *inode = filep->f_dentry->d_inode; int rc = -ENOTTY; /* strange error - but the precedent */ int xid; struct cifs_sb_info *cifs_sb; #ifdef CONFIG_CIFS_POSIX struct cifsFileInfo *pSMBFile = filep->private_data; struct cifs_tcon *tcon; __u64 ExtAttrBits = 0; __u64 ExtAttrMask = 0; __u64 caps; #endif /* CONFIG_CIFS_POSIX */ xid = GetXid(); cFYI(1, "ioctl file %p cmd %u arg %lu", filep, command, arg); cifs_sb = CIFS_SB(inode->i_sb); switch (command) { case CIFS_IOC_CHECKUMOUNT: cFYI(1, "User unmount attempted"); if (cifs_sb->mnt_uid == current_uid()) rc = 0; else { rc = -EACCES; cFYI(1, "uids do not match"); } break; #ifdef CONFIG_CIFS_POSIX case FS_IOC_GETFLAGS: if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); caps = le64_to_cpu(tcon->fsUnixInfo.Capability); if (CIFS_UNIX_EXTATTR_CAP & caps) { rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid, &ExtAttrBits, &ExtAttrMask); if (rc == 0) rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE, (int __user *)arg); } break; case FS_IOC_SETFLAGS: if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); caps = le64_to_cpu(tcon->fsUnixInfo.Capability); if (CIFS_UNIX_EXTATTR_CAP & caps) { if (get_user(ExtAttrBits, (int __user *)arg)) { rc = -EFAULT; break; } /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, extAttrBits, &ExtAttrMask);*/ } cFYI(1, "set flags not implemented yet"); break; #endif /* CONFIG_CIFS_POSIX */ default: cFYI(1, "unsupported ioctl"); break; } FreeXid(xid); return rc; }
gpl-2.0
AmperificSuperKANG/lge_kernel_loki
fs/cifs/ioctl.c
7739
2863
/* * fs/cifs/ioctl.c * * vfs operations that deal with io control * * Copyright (C) International Business Machines Corp., 2005,2007 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifsfs.h" #define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2) long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) { struct inode *inode = filep->f_dentry->d_inode; int rc = -ENOTTY; /* strange error - but the precedent */ int xid; struct cifs_sb_info *cifs_sb; #ifdef CONFIG_CIFS_POSIX struct cifsFileInfo *pSMBFile = filep->private_data; struct cifs_tcon *tcon; __u64 ExtAttrBits = 0; __u64 ExtAttrMask = 0; __u64 caps; #endif /* CONFIG_CIFS_POSIX */ xid = GetXid(); cFYI(1, "ioctl file %p cmd %u arg %lu", filep, command, arg); cifs_sb = CIFS_SB(inode->i_sb); switch (command) { case CIFS_IOC_CHECKUMOUNT: cFYI(1, "User unmount attempted"); if (cifs_sb->mnt_uid == current_uid()) rc = 0; else { rc = -EACCES; cFYI(1, "uids do not match"); } break; #ifdef CONFIG_CIFS_POSIX case FS_IOC_GETFLAGS: if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); caps = le64_to_cpu(tcon->fsUnixInfo.Capability); if (CIFS_UNIX_EXTATTR_CAP & caps) { rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid, &ExtAttrBits, &ExtAttrMask); if (rc == 0) rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE, (int __user *)arg); } break; case FS_IOC_SETFLAGS: if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); caps = le64_to_cpu(tcon->fsUnixInfo.Capability); if (CIFS_UNIX_EXTATTR_CAP & caps) { if (get_user(ExtAttrBits, (int __user *)arg)) { rc = -EFAULT; break; } /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, extAttrBits, &ExtAttrMask);*/ } cFYI(1, "set flags not implemented yet"); break; #endif /* CONFIG_CIFS_POSIX */ default: cFYI(1, "unsupported ioctl"); break; } FreeXid(xid); return rc; }
gpl-2.0
wolverine2k/android_kernel_oppo_n1
drivers/infiniband/hw/cxgb3/iwch_ev.c
9787
7025
/* * Copyright (c) 2006 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/gfp.h> #include <linux/mman.h> #include <net/sock.h> #include "iwch_provider.h" #include "iwch.h" #include "iwch_cm.h" #include "cxio_hal.h" #include "cxio_wr.h" static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, struct respQ_msg_t *rsp_msg, enum ib_event_type ib_event, int send_term) { struct ib_event event; struct iwch_qp_attributes attrs; struct iwch_qp *qhp; unsigned long flag; spin_lock(&rnicp->lock); qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); if (!qhp) { printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n", __func__, CQE_STATUS(rsp_msg->cqe), CQE_QPID(rsp_msg->cqe)); spin_unlock(&rnicp->lock); return; } if ((qhp->attr.state == IWCH_QP_STATE_ERROR) || (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) { PDBG("%s AE received after RTS - " "qp state %d qpid 0x%x status 0x%x\n", __func__, qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); spin_unlock(&rnicp->lock); return; } printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__, CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); atomic_inc(&qhp->refcnt); spin_unlock(&rnicp->lock); if (qhp->attr.state == IWCH_QP_STATE_RTS) { attrs.next_state = IWCH_QP_STATE_TERMINATE; iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); if (send_term) iwch_post_terminate(qhp, rsp_msg); } event.event = ib_event; event.device = chp->ibcq.device; if (ib_event == IB_EVENT_CQ_ERR) event.element.cq = &chp->ibcq; else event.element.qp = &qhp->ibqp; if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); } void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) { struct iwch_dev *rnicp; struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; struct iwch_cq *chp; struct iwch_qp *qhp; u32 cqid = RSPQ_CQID(rsp_msg); unsigned long flag; rnicp = (struct iwch_dev *) rdev_p->ulp; spin_lock(&rnicp->lock); chp = get_chp(rnicp, cqid); qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); if (!chp || !qhp) { printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d " "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n", cqid, CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); spin_unlock(&rnicp->lock); goto out; } iwch_qp_add_ref(&qhp->ibqp); atomic_inc(&chp->refcnt); spin_unlock(&rnicp->lock); /* * 1) completion of our sending a TERMINATE. * 2) incoming TERMINATE message. */ if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) && (CQE_STATUS(rsp_msg->cqe) == 0)) { if (SQ_TYPE(rsp_msg->cqe)) { PDBG("%s QPID 0x%x ep %p disconnecting\n", __func__, qhp->wq.qpid, qhp->ep); iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); } else { PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__, qhp->wq.qpid); post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 0); iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); } goto done; } /* Bad incoming Read request */ if (SQ_TYPE(rsp_msg->cqe) && (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) { post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1); goto done; } /* Bad incoming write */ if (RQ_TYPE(rsp_msg->cqe) && (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) { post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1); goto done; } switch (CQE_STATUS(rsp_msg->cqe)) { /* Completion Events */ case TPT_ERR_SUCCESS: /* * Confirm the destination entry if this is a RECV completion. */ if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) dst_confirm(qhp->ep->dst); spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); break; case TPT_ERR_STAG: case TPT_ERR_PDID: case TPT_ERR_QPID: case TPT_ERR_ACCESS: case TPT_ERR_WRAP: case TPT_ERR_BOUND: case TPT_ERR_INVALIDATE_SHARED_MR: case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); break; /* Device Fatal Errors */ case TPT_ERR_ECC: case TPT_ERR_ECC_PSTAG: case TPT_ERR_INTERNAL_ERR: post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1); break; /* QP Fatal Errors */ case TPT_ERR_OUT_OF_RQE: case TPT_ERR_PBL_ADDR_BOUND: case TPT_ERR_CRC: case TPT_ERR_MARKER: case TPT_ERR_PDU_LEN_ERR: case TPT_ERR_DDP_VERSION: case TPT_ERR_RDMA_VERSION: case TPT_ERR_OPCODE: case TPT_ERR_DDP_QUEUE_NUM: case TPT_ERR_MSN: case TPT_ERR_TBIT: case TPT_ERR_MO: case TPT_ERR_MSN_GAP: case TPT_ERR_MSN_RANGE: case TPT_ERR_RQE_ADDR_BOUND: case TPT_ERR_IRD_OVERFLOW: post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1); break; default: printk(KERN_ERR MOD "Unknown T3 status 0x%x QPID 0x%x\n", CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid); post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1); break; } done: if (atomic_dec_and_test(&chp->refcnt)) wake_up(&chp->wait); iwch_qp_rem_ref(&qhp->ibqp); out: dev_kfree_skb_irq(skb); }
gpl-2.0
umang96/Radon
kernel/workqueue.c
60
144932
/* * kernel/workqueue.c - generic async execution with shared worker pool * * Copyright (C) 2002 Ingo Molnar * * Derived from the taskqueue/keventd code by: * David Woodhouse <dwmw2@infradead.org> * Andrew Morton * Kai Petzke <wpp@marie.physik.tu-berlin.de> * Theodore Ts'o <tytso@mit.edu> * * Made to use alloc_percpu by Christoph Lameter. * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * This is the generic async execution mechanism. Work items as are * executed in process context. The worker pool is shared and * automatically managed. There is one worker pool for each CPU and * one extra for works which are better served by workers which are * not bound to any specific CPU. * * Please read Documentation/workqueue.txt for details. */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/completion.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/hardirq.h> #include <linux/mempolicy.h> #include <linux/freezer.h> #include <linux/kallsyms.h> #include <linux/debug_locks.h> #include <linux/lockdep.h> #include <linux/idr.h> #include <linux/jhash.h> #include <linux/hashtable.h> #include <linux/rculist.h> #include <linux/nodemask.h> #include <linux/moduleparam.h> #include <linux/uaccess.h> #include <linux/bug.h> #include "workqueue_internal.h" enum { /* * worker_pool flags * * A bound pool is either associated or disassociated with its CPU. * While associated (!DISASSOCIATED), all workers are bound to the * CPU and none has %WORKER_UNBOUND set and concurrency management * is in effect. * * While DISASSOCIATED, the cpu may be offline and all workers have * %WORKER_UNBOUND set and concurrency management disabled, and may * be executing on any CPU. The pool behaves as an unbound one. * * Note that DISASSOCIATED should be flipped only while holding * manager_mutex to avoid changing binding state while * create_worker() is in progress. */ POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ POOL_FREEZING = 1 << 3, /* freeze in progress */ /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ WORKER_DIE = 1 << 1, /* die die die */ WORKER_IDLE = 1 << 2, /* is idle */ WORKER_PREP = 1 << 3, /* preparing to run works */ WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ WORKER_UNBOUND = 1 << 7, /* worker is unbound */ WORKER_REBOUND = 1 << 8, /* worker was rebound */ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND | WORKER_REBOUND, NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, /* call for help after 10ms (min two ticks) */ MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ CREATE_COOLDOWN = HZ, /* time to breath after fail */ /* * Rescue workers are used only on emergencies and shared by * all cpus. Give -20. */ RESCUER_NICE_LEVEL = -20, HIGHPRI_NICE_LEVEL = -20, WQ_NAME_LEN = 24, }; /* * Structure fields follow one of the following exclusion rules. * * I: Modifiable by initialization/destruction paths and read-only for * everyone else. * * P: Preemption protected. Disabling preemption is enough and should * only be modified and accessed from the local cpu. * * L: pool->lock protected. Access with pool->lock held. * * X: During normal operation, modification requires pool->lock and should * be done only from local cpu. Either disabling preemption on local * cpu or grabbing pool->lock is enough for read access. If * POOL_DISASSOCIATED is set, it's identical to L. * * MG: pool->manager_mutex and pool->lock protected. Writes require both * locks. Reads can happen under either lock. * * PL: wq_pool_mutex protected. * * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. * * WQ: wq->mutex protected. * * WR: wq->mutex protected for writes. Sched-RCU protected for reads. * * MD: wq_mayday_lock protected. */ /* struct worker is defined in workqueue_internal.h */ struct worker_pool { spinlock_t lock; /* the pool lock */ int cpu; /* I: the associated cpu */ int node; /* I: the associated node ID */ int id; /* I: pool ID */ unsigned int flags; /* X: flags */ struct list_head worklist; /* L: list of pending works */ int nr_workers; /* L: total number of workers */ /* nr_idle includes the ones off idle_list for rebinding */ int nr_idle; /* L: currently idle ones */ struct list_head idle_list; /* X: list of idle workers */ struct timer_list idle_timer; /* L: worker idle timeout */ struct timer_list mayday_timer; /* L: SOS timer for workers */ /* a workers is either on busy_hash or idle_list, or the manager */ DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); /* L: hash of busy workers */ /* see manage_workers() for details on the two manager mutexes */ struct mutex manager_arb; /* manager arbitration */ struct mutex manager_mutex; /* manager exclusion */ struct idr worker_idr; /* MG: worker IDs and iteration */ struct workqueue_attrs *attrs; /* I: worker attributes */ struct hlist_node hash_node; /* PL: unbound_pool_hash node */ int refcnt; /* PL: refcnt for unbound pools */ /* * The current concurrency level. As it's likely to be accessed * from other CPUs during try_to_wake_up(), put it in a separate * cacheline. */ atomic_t nr_running ____cacheline_aligned_in_smp; /* * Destruction of pool is sched-RCU protected to allow dereferences * from get_work_pool(). */ struct rcu_head rcu; } ____cacheline_aligned_in_smp; /* * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS * of work_struct->data are used for flags and the remaining high bits * point to the pwq; thus, pwqs need to be aligned at two's power of the * number of flag bits. */ struct pool_workqueue { struct worker_pool *pool; /* I: the associated pool */ struct workqueue_struct *wq; /* I: the owning workqueue */ int work_color; /* L: current color */ int flush_color; /* L: flushing color */ int refcnt; /* L: reference count */ int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ int nr_active; /* L: nr of active works */ int max_active; /* L: max active works */ struct list_head delayed_works; /* L: delayed works */ struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head mayday_node; /* MD: node on wq->maydays */ /* * Release of unbound pwq is punted to system_wq. See put_pwq() * and pwq_unbound_release_workfn() for details. pool_workqueue * itself is also sched-RCU protected so that the first pwq can be * determined without grabbing wq->mutex. */ struct work_struct unbound_release_work; struct rcu_head rcu; } __aligned(1 << WORK_STRUCT_FLAG_BITS); /* * Structure used to wait for workqueue flush. */ struct wq_flusher { struct list_head list; /* WQ: list of flushers */ int flush_color; /* WQ: flush color waiting for */ struct completion done; /* flush completion */ }; struct wq_device; /* * The externally visible workqueue. It relays the issued work items to * the appropriate worker_pool through its pool_workqueues. */ struct workqueue_struct { struct list_head pwqs; /* WR: all pwqs of this wq */ struct list_head list; /* PL: list of all workqueues */ struct mutex mutex; /* protects this wq */ int work_color; /* WQ: current work color */ int flush_color; /* WQ: current flush color */ atomic_t nr_pwqs_to_flush; /* flush in progress */ struct wq_flusher *first_flusher; /* WQ: first flusher */ struct list_head flusher_queue; /* WQ: flush waiters */ struct list_head flusher_overflow; /* WQ: flush overflow list */ struct list_head maydays; /* MD: pwqs requesting rescue */ struct worker *rescuer; /* I: rescue worker */ int nr_drainers; /* WQ: drain in progress */ int saved_max_active; /* WQ: saved pwq max_active */ struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */ struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */ #ifdef CONFIG_SYSFS struct wq_device *wq_dev; /* I: for sysfs interface */ #endif #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif char name[WQ_NAME_LEN]; /* I: workqueue name */ /* hot fields used during command issue, aligned to cacheline */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */ }; static struct kmem_cache *pwq_cache; static int wq_numa_tbl_len; /* highest possible NUMA node id + 1 */ static cpumask_var_t *wq_numa_possible_cpumask; /* possible CPUs of each node */ static bool wq_disable_numa; module_param_named(disable_numa, wq_disable_numa, bool, 0444); static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ static LIST_HEAD(workqueues); /* PL: list of all workqueues */ static bool workqueue_freezing; /* PL: have wqs started freezing? */ /* the per-cpu worker pools */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ /* PL: hash of all unbound pools keyed by pool->attrs */ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); /* I: attributes used when instantiating standard unbound pools on demand */ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; /* I: attributes used when instantiating ordered pools on demand */ static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; struct workqueue_struct *system_wq __read_mostly; EXPORT_SYMBOL(system_wq); struct workqueue_struct *system_highpri_wq __read_mostly; EXPORT_SYMBOL_GPL(system_highpri_wq); struct workqueue_struct *system_long_wq __read_mostly; EXPORT_SYMBOL_GPL(system_long_wq); struct workqueue_struct *system_unbound_wq __read_mostly; EXPORT_SYMBOL_GPL(system_unbound_wq); struct workqueue_struct *system_freezable_wq __read_mostly; EXPORT_SYMBOL_GPL(system_freezable_wq); static int worker_thread(void *__worker); static void copy_workqueue_attrs(struct workqueue_attrs *to, const struct workqueue_attrs *from); #define CREATE_TRACE_POINTS #include <trace/events/workqueue.h> #define assert_rcu_or_pool_mutex() \ rcu_lockdep_assert(rcu_read_lock_sched_held() || \ lockdep_is_held(&wq_pool_mutex), \ "sched RCU or wq_pool_mutex should be held") #define assert_rcu_or_wq_mutex(wq) \ rcu_lockdep_assert(rcu_read_lock_sched_held() || \ lockdep_is_held(&wq->mutex), \ "sched RCU or wq->mutex should be held") #ifdef CONFIG_LOCKDEP #define assert_manager_or_pool_lock(pool) \ WARN_ONCE(debug_locks && \ !lockdep_is_held(&(pool)->manager_mutex) && \ !lockdep_is_held(&(pool)->lock), \ "pool->manager_mutex or ->lock should be held") #else #define assert_manager_or_pool_lock(pool) do { } while (0) #endif #define for_each_cpu_worker_pool(pool, cpu) \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ (pool)++) /** * for_each_pool - iterate through all worker_pools in the system * @pool: iteration cursor * @pi: integer used for iteration * * This must be called either with wq_pool_mutex held or sched RCU read * locked. If the pool needs to be used beyond the locking in effect, the * caller is responsible for guaranteeing that the pool stays online. * * The if/else clause exists only for the lockdep assertion and can be * ignored. */ #define for_each_pool(pool, pi) \ idr_for_each_entry(&worker_pool_idr, pool, pi) \ if (({ assert_rcu_or_pool_mutex(); false; })) { } \ else /** * for_each_pool_worker - iterate through all workers of a worker_pool * @worker: iteration cursor * @wi: integer used for iteration * @pool: worker_pool to iterate workers of * * This must be called with either @pool->manager_mutex or ->lock held. * * The if/else clause exists only for the lockdep assertion and can be * ignored. */ #define for_each_pool_worker(worker, wi, pool) \ idr_for_each_entry(&(pool)->worker_idr, (worker), (wi)) \ if (({ assert_manager_or_pool_lock((pool)); false; })) { } \ else /** * for_each_pwq - iterate through all pool_workqueues of the specified workqueue * @pwq: iteration cursor * @wq: the target workqueue * * This must be called either with wq->mutex held or sched RCU read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. * * The if/else clause exists only for the lockdep assertion and can be * ignored. */ #define for_each_pwq(pwq, wq) \ list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ else #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; static void *work_debug_hint(void *addr) { return ((struct work_struct *) addr)->func; } /* * fixup_init is called when: * - an active object is initialized */ static int work_fixup_init(void *addr, enum debug_obj_state state) { struct work_struct *work = addr; switch (state) { case ODEBUG_STATE_ACTIVE: cancel_work_sync(work); debug_object_init(work, &work_debug_descr); return 1; default: return 0; } } /* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) */ static int work_fixup_activate(void *addr, enum debug_obj_state state) { struct work_struct *work = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: /* * This is not really a fixup. The work struct was * statically initialized. We just make sure that it * is tracked in the object tracker. */ if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { debug_object_init(work, &work_debug_descr); debug_object_activate(work, &work_debug_descr); return 0; } WARN_ON_ONCE(1); return 0; case ODEBUG_STATE_ACTIVE: WARN_ON(1); default: return 0; } } /* * fixup_free is called when: * - an active object is freed */ static int work_fixup_free(void *addr, enum debug_obj_state state) { struct work_struct *work = addr; switch (state) { case ODEBUG_STATE_ACTIVE: cancel_work_sync(work); debug_object_free(work, &work_debug_descr); return 1; default: return 0; } } static struct debug_obj_descr work_debug_descr = { .name = "work_struct", .debug_hint = work_debug_hint, .fixup_init = work_fixup_init, .fixup_activate = work_fixup_activate, .fixup_free = work_fixup_free, }; static inline void debug_work_activate(struct work_struct *work) { debug_object_activate(work, &work_debug_descr); } static inline void debug_work_deactivate(struct work_struct *work) { debug_object_deactivate(work, &work_debug_descr); } void __init_work(struct work_struct *work, int onstack) { if (onstack) debug_object_init_on_stack(work, &work_debug_descr); else debug_object_init(work, &work_debug_descr); } EXPORT_SYMBOL_GPL(__init_work); void destroy_work_on_stack(struct work_struct *work) { debug_object_free(work, &work_debug_descr); } EXPORT_SYMBOL_GPL(destroy_work_on_stack); #else static inline void debug_work_activate(struct work_struct *work) { } static inline void debug_work_deactivate(struct work_struct *work) { } #endif /* allocate ID and assign it to @pool */ static int worker_pool_assign_id(struct worker_pool *pool) { int ret; lockdep_assert_held(&wq_pool_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) { pool->id = ret; return 0; } return ret; } /** * unbound_pwq_by_node - return the unbound pool_workqueue for the given node * @wq: the target workqueue * @node: the node ID * * This must be called either with pwq_lock held or sched RCU read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. */ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, int node) { assert_rcu_or_wq_mutex(wq); return rcu_dereference_raw(wq->numa_pwq_tbl[node]); } static unsigned int work_color_to_flags(int color) { return color << WORK_STRUCT_COLOR_SHIFT; } static int get_work_color(struct work_struct *work) { return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & ((1 << WORK_STRUCT_COLOR_BITS) - 1); } static int work_next_color(int color) { return (color + 1) % WORK_NR_COLORS; } /* * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data * contain the pointer to the queued pwq. Once execution starts, the flag * is cleared and the high bits contain OFFQ flags and pool ID. * * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() * and clear_work_data() can be used to set the pwq, pool or clear * work->data. These functions should only be called while the work is * owned - ie. while the PENDING bit is set. * * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq * corresponding to a work. Pool is available once the work has been * queued anywhere after initialization until it is sync canceled. pwq is * available only while the work item is queued. * * %WORK_OFFQ_CANCELING is used to mark a work item which is being * canceled. While being canceled, a work item may have its PENDING set * but stay off timer and worklist for arbitrarily long and nobody should * try to steal the PENDING bit. */ static inline void set_work_data(struct work_struct *work, unsigned long data, unsigned long flags) { WARN_ON_ONCE(!work_pending(work)); atomic_long_set(&work->data, data | flags | work_static(work)); } static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, unsigned long extra_flags) { set_work_data(work, (unsigned long)pwq, WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); } static void set_work_pool_and_keep_pending(struct work_struct *work, int pool_id) { set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, WORK_STRUCT_PENDING); } static void set_work_pool_and_clear_pending(struct work_struct *work, int pool_id) { /* * The following wmb is paired with the implied mb in * test_and_set_bit(PENDING) and ensures all updates to @work made * here are visible to and precede any updates by the next PENDING * owner. */ smp_wmb(); set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); } static void clear_work_data(struct work_struct *work) { smp_wmb(); /* see set_work_pool_and_clear_pending() */ set_work_data(work, WORK_STRUCT_NO_POOL, 0); } static struct pool_workqueue *get_work_pwq(struct work_struct *work) { unsigned long data = atomic_long_read(&work->data); if (data & WORK_STRUCT_PWQ) return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); else return NULL; } /** * get_work_pool - return the worker_pool a given work was associated with * @work: the work item of interest * * Return the worker_pool @work was last associated with. %NULL if none. * * Pools are created and destroyed under wq_pool_mutex, and allows read * access under sched-RCU read lock. As such, this function should be * called under wq_pool_mutex or with preemption disabled. * * All fields of the returned pool are accessible as long as the above * mentioned locking is in effect. If the returned pool needs to be used * beyond the critical section, the caller is responsible for ensuring the * returned pool is and stays online. */ static struct worker_pool *get_work_pool(struct work_struct *work) { unsigned long data = atomic_long_read(&work->data); int pool_id; assert_rcu_or_pool_mutex(); if (data & WORK_STRUCT_PWQ) return ((struct pool_workqueue *) (data & WORK_STRUCT_WQ_DATA_MASK))->pool; pool_id = data >> WORK_OFFQ_POOL_SHIFT; if (pool_id == WORK_OFFQ_POOL_NONE) return NULL; return idr_find(&worker_pool_idr, pool_id); } /** * get_work_pool_id - return the worker pool ID a given work is associated with * @work: the work item of interest * * Return the worker_pool ID @work was last associated with. * %WORK_OFFQ_POOL_NONE if none. */ static int get_work_pool_id(struct work_struct *work) { unsigned long data = atomic_long_read(&work->data); if (data & WORK_STRUCT_PWQ) return ((struct pool_workqueue *) (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; return data >> WORK_OFFQ_POOL_SHIFT; } static void mark_work_canceling(struct work_struct *work) { unsigned long pool_id = get_work_pool_id(work); pool_id <<= WORK_OFFQ_POOL_SHIFT; set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); } static bool work_is_canceling(struct work_struct *work) { unsigned long data = atomic_long_read(&work->data); return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); } /* * Policy functions. These define the policies on how the global worker * pools are managed. Unless noted otherwise, these functions assume that * they're being called with pool->lock held. */ static bool __need_more_worker(struct worker_pool *pool) { return !atomic_read(&pool->nr_running); } /* * Need to wake up a worker? Called from anything but currently * running workers. * * Note that, because unbound workers never contribute to nr_running, this * function will always return %true for unbound pools as long as the * worklist isn't empty. */ static bool need_more_worker(struct worker_pool *pool) { return !list_empty(&pool->worklist) && __need_more_worker(pool); } /* Can I start working? Called from busy but !running workers. */ static bool may_start_working(struct worker_pool *pool) { return pool->nr_idle; } /* Do I need to keep working? Called from currently running workers. */ static bool keep_working(struct worker_pool *pool) { return !list_empty(&pool->worklist) && atomic_read(&pool->nr_running) <= 1; } /* Do we need a new worker? Called from manager. */ static bool need_to_create_worker(struct worker_pool *pool) { return need_more_worker(pool) && !may_start_working(pool); } /* Do I need to be the manager? */ static bool need_to_manage_workers(struct worker_pool *pool) { return need_to_create_worker(pool) || (pool->flags & POOL_MANAGE_WORKERS); } /* Do we have too many workers and should some go away? */ static bool too_many_workers(struct worker_pool *pool) { bool managing = mutex_is_locked(&pool->manager_arb); int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ int nr_busy = pool->nr_workers - nr_idle; /* * nr_idle and idle_list may disagree if idle rebinding is in * progress. Never return %true if idle_list is empty. */ if (list_empty(&pool->idle_list)) return false; return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; } /* * Wake up functions. */ /* Return the first worker. Safe with preemption disabled */ static struct worker *first_worker(struct worker_pool *pool) { if (unlikely(list_empty(&pool->idle_list))) return NULL; return list_first_entry(&pool->idle_list, struct worker, entry); } /** * wake_up_worker - wake up an idle worker * @pool: worker pool to wake worker from * * Wake up the first idle worker of @pool. * * CONTEXT: * spin_lock_irq(pool->lock). */ static void wake_up_worker(struct worker_pool *pool) { struct worker *worker = first_worker(pool); if (likely(worker)) wake_up_process(worker->task); } /** * wq_worker_waking_up - a worker is waking up * @task: task waking up * @cpu: CPU @task is waking up to * * This function is called during try_to_wake_up() when a worker is * being awoken. * * CONTEXT: * spin_lock_irq(rq->lock) */ void wq_worker_waking_up(struct task_struct *task, int cpu) { struct worker *worker = kthread_data(task); if (!(worker->flags & WORKER_NOT_RUNNING)) { WARN_ON_ONCE(worker->pool->cpu != cpu); atomic_inc(&worker->pool->nr_running); } } /** * wq_worker_sleeping - a worker is going to sleep * @task: task going to sleep * @cpu: CPU in question, must be the current CPU number * * This function is called during schedule() when a busy worker is * going to sleep. Worker on the same cpu can be woken up by * returning pointer to its task. * * CONTEXT: * spin_lock_irq(rq->lock) * * RETURNS: * Worker task on @cpu to wake up, %NULL if none. */ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) { struct worker *worker = kthread_data(task), *to_wakeup = NULL; struct worker_pool *pool; /* * Rescuers, which may not have all the fields set up like normal * workers, also reach here, let's not access anything before * checking NOT_RUNNING. */ if (worker->flags & WORKER_NOT_RUNNING) return NULL; pool = worker->pool; /* this can only happen on the local cpu */ if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) return NULL; /* * The counterpart of the following dec_and_test, implied mb, * worklist not empty test sequence is in insert_work(). * Please read comment there. * * NOT_RUNNING is clear. This means that we're bound to and * running on the local cpu w/ rq lock held and preemption * disabled, which in turn means that none else could be * manipulating idle_list, so dereferencing idle_list without pool * lock is safe. */ if (atomic_dec_and_test(&pool->nr_running) && !list_empty(&pool->worklist)) to_wakeup = first_worker(pool); return to_wakeup ? to_wakeup->task : NULL; } /** * worker_set_flags - set worker flags and adjust nr_running accordingly * @worker: self * @flags: flags to set * @wakeup: wakeup an idle worker if necessary * * Set @flags in @worker->flags and adjust nr_running accordingly. If * nr_running becomes zero and @wakeup is %true, an idle worker is * woken up. * * CONTEXT: * spin_lock_irq(pool->lock) */ static inline void worker_set_flags(struct worker *worker, unsigned int flags, bool wakeup) { struct worker_pool *pool = worker->pool; WARN_ON_ONCE(worker->task != current); /* * If transitioning into NOT_RUNNING, adjust nr_running and * wake up an idle worker as necessary if requested by * @wakeup. */ if ((flags & WORKER_NOT_RUNNING) && !(worker->flags & WORKER_NOT_RUNNING)) { if (wakeup) { if (atomic_dec_and_test(&pool->nr_running) && !list_empty(&pool->worklist)) wake_up_worker(pool); } else atomic_dec(&pool->nr_running); } worker->flags |= flags; } /** * worker_clr_flags - clear worker flags and adjust nr_running accordingly * @worker: self * @flags: flags to clear * * Clear @flags in @worker->flags and adjust nr_running accordingly. * * CONTEXT: * spin_lock_irq(pool->lock) */ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) { struct worker_pool *pool = worker->pool; unsigned int oflags = worker->flags; WARN_ON_ONCE(worker->task != current); worker->flags &= ~flags; /* * If transitioning out of NOT_RUNNING, increment nr_running. Note * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask * of multiple flags, not a single flag. */ if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&pool->nr_running); } /** * find_worker_executing_work - find worker which is executing a work * @pool: pool of interest * @work: work to find worker for * * Find a worker which is executing @work on @pool by searching * @pool->busy_hash which is keyed by the address of @work. For a worker * to match, its current execution should match the address of @work and * its work function. This is to avoid unwanted dependency between * unrelated work executions through a work item being recycled while still * being executed. * * This is a bit tricky. A work item may be freed once its execution * starts and nothing prevents the freed area from being recycled for * another work item. If the same work item address ends up being reused * before the original execution finishes, workqueue will identify the * recycled work item as currently executing and make it wait until the * current execution finishes, introducing an unwanted dependency. * * This function checks the work item address and work function to avoid * false positives. Note that this isn't complete as one may construct a * work function which can introduce dependency onto itself through a * recycled work item. Well, if somebody wants to shoot oneself in the * foot that badly, there's only so much we can do, and if such deadlock * actually occurs, it should be easy to locate the culprit work function. * * CONTEXT: * spin_lock_irq(pool->lock). * * RETURNS: * Pointer to worker which is executing @work if found, NULL * otherwise. */ static struct worker *find_worker_executing_work(struct worker_pool *pool, struct work_struct *work) { struct worker *worker; hash_for_each_possible(pool->busy_hash, worker, hentry, (unsigned long)work) if (worker->current_work == work && worker->current_func == work->func) return worker; return NULL; } /** * move_linked_works - move linked works to a list * @work: start of series of works to be scheduled * @head: target list to append @work to * @nextp: out paramter for nested worklist walking * * Schedule linked works starting from @work to @head. Work series to * be scheduled starts at @work and includes any consecutive work with * WORK_STRUCT_LINKED set in its predecessor. * * If @nextp is not NULL, it's updated to point to the next work of * the last scheduled work. This allows move_linked_works() to be * nested inside outer list_for_each_entry_safe(). * * CONTEXT: * spin_lock_irq(pool->lock). */ static void move_linked_works(struct work_struct *work, struct list_head *head, struct work_struct **nextp) { struct work_struct *n; /* * Linked worklist will always end before the end of the list, * use NULL for list head. */ list_for_each_entry_safe_from(work, n, NULL, entry) { list_move_tail(&work->entry, head); if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) break; } /* * If we're already inside safe list traversal and have moved * multiple works to the scheduled queue, the next position * needs to be updated. */ if (nextp) *nextp = n; } /** * get_pwq - get an extra reference on the specified pool_workqueue * @pwq: pool_workqueue to get * * Obtain an extra reference on @pwq. The caller should guarantee that * @pwq has positive refcnt and be holding the matching pool->lock. */ static void get_pwq(struct pool_workqueue *pwq) { lockdep_assert_held(&pwq->pool->lock); WARN_ON_ONCE(pwq->refcnt <= 0); pwq->refcnt++; } /** * put_pwq - put a pool_workqueue reference * @pwq: pool_workqueue to put * * Drop a reference of @pwq. If its refcnt reaches zero, schedule its * destruction. The caller should be holding the matching pool->lock. */ static void put_pwq(struct pool_workqueue *pwq) { lockdep_assert_held(&pwq->pool->lock); if (likely(--pwq->refcnt)) return; if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) return; /* * @pwq can't be released under pool->lock, bounce to * pwq_unbound_release_workfn(). This never recurses on the same * pool->lock as this path is taken only for unbound workqueues and * the release work item is scheduled on a per-cpu workqueue. To * avoid lockdep warning, unbound pool->locks are given lockdep * subclass of 1 in get_unbound_pool(). */ schedule_work(&pwq->unbound_release_work); } /** * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock * @pwq: pool_workqueue to put (can be %NULL) * * put_pwq() with locking. This function also allows %NULL @pwq. */ static void put_pwq_unlocked(struct pool_workqueue *pwq) { if (pwq) { /* * As both pwqs and pools are sched-RCU protected, the * following lock operations are safe. */ spin_lock_irq(&pwq->pool->lock); put_pwq(pwq); spin_unlock_irq(&pwq->pool->lock); } } static void pwq_activate_delayed_work(struct work_struct *work) { struct pool_workqueue *pwq = get_work_pwq(work); trace_workqueue_activate_work(work); move_linked_works(work, &pwq->pool->worklist, NULL); __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); pwq->nr_active++; } static void pwq_activate_first_delayed(struct pool_workqueue *pwq) { struct work_struct *work = list_first_entry(&pwq->delayed_works, struct work_struct, entry); pwq_activate_delayed_work(work); } /** * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight * @pwq: pwq of interest * @color: color of work which left the queue * * A work either has completed or is removed from pending queue, * decrement nr_in_flight of its pwq and handle workqueue flushing. * * CONTEXT: * spin_lock_irq(pool->lock). */ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) { /* uncolored work items don't participate in flushing or nr_active */ if (color == WORK_NO_COLOR) goto out_put; pwq->nr_in_flight[color]--; pwq->nr_active--; if (!list_empty(&pwq->delayed_works)) { /* one down, submit a delayed one */ if (pwq->nr_active < pwq->max_active) pwq_activate_first_delayed(pwq); } /* is flush in progress and are we at the flushing tip? */ if (likely(pwq->flush_color != color)) goto out_put; /* are there still in-flight works? */ if (pwq->nr_in_flight[color]) goto out_put; /* this pwq is done, clear flush_color */ pwq->flush_color = -1; /* * If this was the last pwq, wake up the first flusher. It * will handle the rest. */ if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) complete(&pwq->wq->first_flusher->done); out_put: put_pwq(pwq); } /** * try_to_grab_pending - steal work item from worklist and disable irq * @work: work item to steal * @is_dwork: @work is a delayed_work * @flags: place to store irq state * * Try to grab PENDING bit of @work. This function can handle @work in any * stable state - idle, on timer or on worklist. Return values are * * 1 if @work was pending and we successfully stole PENDING * 0 if @work was idle and we claimed PENDING * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry * -ENOENT if someone else is canceling @work, this state may persist * for arbitrarily long * * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting * interrupted while holding PENDING and @work off queue, irq must be * disabled on entry. This, combined with delayed_work->timer being * irqsafe, ensures that we return -EAGAIN for finite short period of time. * * On successful return, >= 0, irq is disabled and the caller is * responsible for releasing it using local_irq_restore(*@flags). * * This function is safe to call from any context including IRQ handler. */ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, unsigned long *flags) { struct worker_pool *pool; struct pool_workqueue *pwq; local_irq_save(*flags); /* try to steal the timer if it exists */ if (is_dwork) { struct delayed_work *dwork = to_delayed_work(work); /* * dwork->timer is irqsafe. If del_timer() fails, it's * guaranteed that the timer is not queued anywhere and not * running on the local CPU. */ if (likely(del_timer(&dwork->timer))) return 1; } /* try to claim PENDING the normal way */ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) return 0; /* * The queueing is in progress, or it is already queued. Try to * steal it from ->worklist without clearing WORK_STRUCT_PENDING. */ pool = get_work_pool(work); if (!pool) goto fail; spin_lock(&pool->lock); /* * work->data is guaranteed to point to pwq only while the work * item is queued on pwq->wq, and both updating work->data to point * to pwq on queueing and to pool on dequeueing are done under * pwq->pool->lock. This in turn guarantees that, if work->data * points to pwq which is associated with a locked pool, the work * item is currently queued on that pool. */ pwq = get_work_pwq(work); if (pwq && pwq->pool == pool) { debug_work_deactivate(work); /* * A delayed work item cannot be grabbed directly because * it might have linked NO_COLOR work items which, if left * on the delayed_list, will confuse pwq->nr_active * management later on and cause stall. Make sure the work * item is activated before grabbing. */ if (*work_data_bits(work) & WORK_STRUCT_DELAYED) pwq_activate_delayed_work(work); list_del_init(&work->entry); pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work)); /* work->data points to pwq iff queued, point to pool */ set_work_pool_and_keep_pending(work, pool->id); spin_unlock(&pool->lock); return 1; } spin_unlock(&pool->lock); fail: local_irq_restore(*flags); if (work_is_canceling(work)) return -ENOENT; cpu_relax(); return -EAGAIN; } /** * insert_work - insert a work into a pool * @pwq: pwq @work belongs to * @work: work to insert * @head: insertion point * @extra_flags: extra WORK_STRUCT_* flags to set * * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to * work_struct flags. * * CONTEXT: * spin_lock_irq(pool->lock). */ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { struct worker_pool *pool = pwq->pool; /* we own @work, set data and link */ set_work_pwq(work, pwq, extra_flags); list_add_tail(&work->entry, head); get_pwq(pwq); /* * Ensure either wq_worker_sleeping() sees the above * list_add_tail() or we see zero nr_running to avoid workers lying * around lazily while there are works to be processed. */ smp_mb(); if (__need_more_worker(pool)) wake_up_worker(pool); } /* * Test whether @work is being queued from another work executing on the * same workqueue. */ static bool is_chained_work(struct workqueue_struct *wq) { struct worker *worker; worker = current_wq_worker(); /* * Return %true iff I'm a worker execuing a work item on @wq. If * I'm @worker, it's safe to dereference it without locking. */ return worker && worker->current_pwq->wq == wq; } static void __queue_work(int cpu, struct workqueue_struct *wq, struct work_struct *work) { struct pool_workqueue *pwq; struct worker_pool *last_pool; struct list_head *worklist; unsigned int work_flags; unsigned int req_cpu = cpu; /* * While a work item is PENDING && off queue, a task trying to * steal the PENDING will busy-loop waiting for it to either get * queued or lose PENDING. Grabbing PENDING and queueing should * happen with IRQ disabled. */ WARN_ON_ONCE(!irqs_disabled()); debug_work_activate(work); /* if dying, only works from the same workqueue are allowed */ if (unlikely(wq->flags & __WQ_DRAINING) && WARN_ON_ONCE(!is_chained_work(wq))) return; retry: if (req_cpu == WORK_CPU_UNBOUND) cpu = raw_smp_processor_id(); /* pwq which will be used unless @work is executing elsewhere */ if (!(wq->flags & WQ_UNBOUND)) pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); else pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); /* * If @work was previously on a different pool, it might still be * running there, in which case the work needs to be queued on that * pool to guarantee non-reentrancy. */ last_pool = get_work_pool(work); if (last_pool && last_pool != pwq->pool) { struct worker *worker; spin_lock(&last_pool->lock); worker = find_worker_executing_work(last_pool, work); if (worker && worker->current_pwq->wq == wq) { pwq = worker->current_pwq; } else { /* meh... not running there, queue here */ spin_unlock(&last_pool->lock); spin_lock(&pwq->pool->lock); } } else { spin_lock(&pwq->pool->lock); } /* * pwq is determined and locked. For unbound pools, we could have * raced with pwq release and it could already be dead. If its * refcnt is zero, repeat pwq selection. Note that pwqs never die * without another pwq replacing it in the numa_pwq_tbl or while * work items are executing on it, so the retrying is guaranteed to * make forward-progress. */ if (unlikely(!pwq->refcnt)) { if (wq->flags & WQ_UNBOUND) { spin_unlock(&pwq->pool->lock); cpu_relax(); goto retry; } /* oops */ WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", wq->name, cpu); } /* pwq determined, queue */ trace_workqueue_queue_work(req_cpu, pwq, work); if (WARN_ON(!list_empty(&work->entry))) { spin_unlock(&pwq->pool->lock); return; } pwq->nr_in_flight[pwq->work_color]++; work_flags = work_color_to_flags(pwq->work_color); if (likely(pwq->nr_active < pwq->max_active)) { trace_workqueue_activate_work(work); pwq->nr_active++; worklist = &pwq->pool->worklist; } else { work_flags |= WORK_STRUCT_DELAYED; worklist = &pwq->delayed_works; } insert_work(pwq, work, worklist, work_flags); spin_unlock(&pwq->pool->lock); } /** * queue_work_on - queue work on specific cpu * @cpu: CPU number to execute work on * @wq: workqueue to use * @work: work to queue * * Returns %false if @work was already on a queue, %true otherwise. * * We queue the work to a specific CPU, the caller must ensure it * can't go away. */ bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { bool ret = false; unsigned long flags; local_irq_save(flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = true; } local_irq_restore(flags); return ret; } EXPORT_SYMBOL(queue_work_on); void delayed_work_timer_fn(unsigned long __data) { struct delayed_work *dwork = (struct delayed_work *)__data; /* should have been called from irqsafe timer with irq already off */ __queue_work(dwork->cpu, dwork->wq, &dwork->work); } EXPORT_SYMBOL(delayed_work_timer_fn); static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; WARN_ON_ONCE(timer->function != delayed_work_timer_fn || timer->data != (unsigned long)dwork); WARN_ON_ONCE(timer_pending(timer)); WARN_ON_ONCE(!list_empty(&work->entry)); /* * If @delay is 0, queue @dwork->work immediately. This is for * both optimization and correctness. The earliest @timer can * expire is on the closest next tick and delayed_work users depend * on that there's no such delay when @delay is 0. */ if (!delay) { __queue_work(cpu, wq, &dwork->work); return; } timer_stats_timer_set_start_info(&dwork->timer); dwork->wq = wq; dwork->cpu = cpu; timer->expires = jiffies + delay; if (unlikely(cpu != WORK_CPU_UNBOUND)) add_timer_on(timer, cpu); else add_timer(timer); } /** * queue_delayed_work_on - queue work on specific CPU after delay * @cpu: CPU number to execute work on * @wq: workqueue to use * @dwork: work to queue * @delay: number of jiffies to wait before queueing * * Returns %false if @work was already on a queue, %true otherwise. If * @delay is zero and @dwork is idle, it will be scheduled for immediate * execution. */ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { struct work_struct *work = &dwork->work; bool ret = false; unsigned long flags; /* read the comment in __queue_work() */ local_irq_save(flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_delayed_work(cpu, wq, dwork, delay); ret = true; } local_irq_restore(flags); return ret; } EXPORT_SYMBOL(queue_delayed_work_on); /** * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU * @cpu: CPU number to execute work on * @wq: workqueue to use * @dwork: work to queue * @delay: number of jiffies to wait before queueing * * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, * modify @dwork's timer so that it expires after @delay. If @delay is * zero, @work is guaranteed to be scheduled immediately regardless of its * current state. * * Returns %false if @dwork was idle and queued, %true if @dwork was * pending and its timer was modified. * * This function is safe to call from any context including IRQ handler. * See try_to_grab_pending() for details. */ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { unsigned long flags; int ret; do { ret = try_to_grab_pending(&dwork->work, true, &flags); } while (unlikely(ret == -EAGAIN)); if (likely(ret >= 0)) { __queue_delayed_work(cpu, wq, dwork, delay); local_irq_restore(flags); } /* -ENOENT from try_to_grab_pending() becomes %true */ return ret; } EXPORT_SYMBOL_GPL(mod_delayed_work_on); /** * worker_enter_idle - enter idle state * @worker: worker which is entering idle state * * @worker is entering idle state. Update stats and idle timer if * necessary. * * LOCKING: * spin_lock_irq(pool->lock). */ static void worker_enter_idle(struct worker *worker) { struct worker_pool *pool = worker->pool; if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || WARN_ON_ONCE(!list_empty(&worker->entry) && (worker->hentry.next || worker->hentry.pprev))) return; /* can't use worker_set_flags(), also called from start_worker() */ worker->flags |= WORKER_IDLE; pool->nr_idle++; worker->last_active = jiffies; /* idle_list is LIFO */ list_add(&worker->entry, &pool->idle_list); if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); /* * Sanity check nr_running. Because wq_unbind_fn() releases * pool->lock between setting %WORKER_UNBOUND and zapping * nr_running, the warning may trigger spuriously. Check iff * unbind is not in progress. */ WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && pool->nr_workers == pool->nr_idle && atomic_read(&pool->nr_running)); } /** * worker_leave_idle - leave idle state * @worker: worker which is leaving idle state * * @worker is leaving idle state. Update stats. * * LOCKING: * spin_lock_irq(pool->lock). */ static void worker_leave_idle(struct worker *worker) { struct worker_pool *pool = worker->pool; if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) return; worker_clr_flags(worker, WORKER_IDLE); pool->nr_idle--; list_del_init(&worker->entry); } /** * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it * @pool: target worker_pool * * Bind %current to the cpu of @pool if it is associated and lock @pool. * * Works which are scheduled while the cpu is online must at least be * scheduled to a worker which is bound to the cpu so that if they are * flushed from cpu callbacks while cpu is going down, they are * guaranteed to execute on the cpu. * * This function is to be used by unbound workers and rescuers to bind * themselves to the target cpu and may race with cpu going down or * coming online. kthread_bind() can't be used because it may put the * worker to already dead cpu and set_cpus_allowed_ptr() can't be used * verbatim as it's best effort and blocking and pool may be * [dis]associated in the meantime. * * This function tries set_cpus_allowed() and locks pool and verifies the * binding against %POOL_DISASSOCIATED which is set during * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker * enters idle state or fetches works without dropping lock, it can * guarantee the scheduling requirement described in the first paragraph. * * CONTEXT: * Might sleep. Called without any lock but returns with pool->lock * held. * * RETURNS: * %true if the associated pool is online (@worker is successfully * bound), %false if offline. */ static bool worker_maybe_bind_and_lock(struct worker_pool *pool) __acquires(&pool->lock) { while (true) { /* * The following call may fail, succeed or succeed * without actually migrating the task to the cpu if * it races with cpu hotunplug operation. Verify * against POOL_DISASSOCIATED. */ if (!(pool->flags & POOL_DISASSOCIATED)) set_cpus_allowed_ptr(current, pool->attrs->cpumask); spin_lock_irq(&pool->lock); if (pool->flags & POOL_DISASSOCIATED) return false; if (task_cpu(current) == pool->cpu && cpumask_equal(&current->cpus_allowed, pool->attrs->cpumask)) return true; spin_unlock_irq(&pool->lock); /* * We've raced with CPU hot[un]plug. Give it a breather * and retry migration. cond_resched() is required here; * otherwise, we might deadlock against cpu_stop trying to * bring down the CPU on non-preemptive kernel. */ cpu_relax(); cond_resched(); } } static struct worker *alloc_worker(void) { struct worker *worker; worker = kzalloc(sizeof(*worker), GFP_KERNEL); if (worker) { INIT_LIST_HEAD(&worker->entry); INIT_LIST_HEAD(&worker->scheduled); /* on creation a worker is in !idle && prep state */ worker->flags = WORKER_PREP; } return worker; } /** * create_worker - create a new workqueue worker * @pool: pool the new worker will belong to * * Create a new worker which is bound to @pool. The returned worker * can be started by calling start_worker() or destroyed using * destroy_worker(). * * CONTEXT: * Might sleep. Does GFP_KERNEL allocations. * * RETURNS: * Pointer to the newly created worker. */ static struct worker *create_worker(struct worker_pool *pool) { struct worker *worker = NULL; int id = -1; char id_buf[16]; lockdep_assert_held(&pool->manager_mutex); /* * ID is needed to determine kthread name. Allocate ID first * without installing the pointer. */ idr_preload(GFP_KERNEL); spin_lock_irq(&pool->lock); id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT); spin_unlock_irq(&pool->lock); idr_preload_end(); if (id < 0) goto fail; worker = alloc_worker(); if (!worker) goto fail; worker->pool = pool; worker->id = id; if (pool->cpu >= 0) snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, pool->attrs->nice < 0 ? "H" : ""); else snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); worker->task = kthread_create_on_node(worker_thread, worker, pool->node, "kworker/%s", id_buf); if (IS_ERR(worker->task)) goto fail; /* * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any * online CPUs. It'll be re-applied when any of the CPUs come up. */ set_user_nice(worker->task, pool->attrs->nice); set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); /* prevent userland from meddling with cpumask of workqueue workers */ worker->task->flags |= PF_NO_SETAFFINITY; /* * The caller is responsible for ensuring %POOL_DISASSOCIATED * remains stable across this function. See the comments above the * flag definition for details. */ if (pool->flags & POOL_DISASSOCIATED) worker->flags |= WORKER_UNBOUND; /* successful, commit the pointer to idr */ spin_lock_irq(&pool->lock); idr_replace(&pool->worker_idr, worker, worker->id); spin_unlock_irq(&pool->lock); return worker; fail: if (id >= 0) { spin_lock_irq(&pool->lock); idr_remove(&pool->worker_idr, id); spin_unlock_irq(&pool->lock); } kfree(worker); return NULL; } /** * start_worker - start a newly created worker * @worker: worker to start * * Make the pool aware of @worker and start it. * * CONTEXT: * spin_lock_irq(pool->lock). */ static void start_worker(struct worker *worker) { worker->flags |= WORKER_STARTED; worker->pool->nr_workers++; worker_enter_idle(worker); wake_up_process(worker->task); } /** * create_and_start_worker - create and start a worker for a pool * @pool: the target pool * * Grab the managership of @pool and create and start a new worker for it. */ static int create_and_start_worker(struct worker_pool *pool) { struct worker *worker; mutex_lock(&pool->manager_mutex); worker = create_worker(pool); if (worker) { spin_lock_irq(&pool->lock); start_worker(worker); spin_unlock_irq(&pool->lock); } mutex_unlock(&pool->manager_mutex); return worker ? 0 : -ENOMEM; } /** * destroy_worker - destroy a workqueue worker * @worker: worker to be destroyed * * Destroy @worker and adjust @pool stats accordingly. * * CONTEXT: * spin_lock_irq(pool->lock) which is released and regrabbed. */ static void destroy_worker(struct worker *worker) { struct worker_pool *pool = worker->pool; lockdep_assert_held(&pool->manager_mutex); lockdep_assert_held(&pool->lock); /* sanity check frenzy */ if (WARN_ON(worker->current_work) || WARN_ON(!list_empty(&worker->scheduled))) return; if (worker->flags & WORKER_STARTED) pool->nr_workers--; if (worker->flags & WORKER_IDLE) pool->nr_idle--; /* * Once WORKER_DIE is set, the kworker may destroy itself at any * point. Pin to ensure the task stays until we're done with it. */ get_task_struct(worker->task); list_del_init(&worker->entry); worker->flags |= WORKER_DIE; idr_remove(&pool->worker_idr, worker->id); spin_unlock_irq(&pool->lock); kthread_stop(worker->task); put_task_struct(worker->task); kfree(worker); spin_lock_irq(&pool->lock); } static void idle_worker_timeout(unsigned long __pool) { struct worker_pool *pool = (void *)__pool; spin_lock_irq(&pool->lock); if (too_many_workers(pool)) { struct worker *worker; unsigned long expires; /* idle_list is kept in LIFO order, check the last one */ worker = list_entry(pool->idle_list.prev, struct worker, entry); expires = worker->last_active + IDLE_WORKER_TIMEOUT; if (time_before(jiffies, expires)) mod_timer(&pool->idle_timer, expires); else { /* it's been idle for too long, wake up manager */ pool->flags |= POOL_MANAGE_WORKERS; wake_up_worker(pool); } } spin_unlock_irq(&pool->lock); } static void send_mayday(struct work_struct *work) { struct pool_workqueue *pwq = get_work_pwq(work); struct workqueue_struct *wq = pwq->wq; lockdep_assert_held(&wq_mayday_lock); if (!wq->rescuer) return; /* mayday mayday mayday */ if (list_empty(&pwq->mayday_node)) { /* * If @pwq is for an unbound wq, its base ref may be put at * any time due to an attribute change. Pin @pwq until the * rescuer is done with it. */ get_pwq(pwq); list_add_tail(&pwq->mayday_node, &wq->maydays); wake_up_process(wq->rescuer->task); } } static void pool_mayday_timeout(unsigned long __pool) { struct worker_pool *pool = (void *)__pool; struct work_struct *work; spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */ spin_lock(&pool->lock); if (need_to_create_worker(pool)) { /* * We've been trying to create a new worker but * haven't been successful. We might be hitting an * allocation deadlock. Send distress signals to * rescuers. */ list_for_each_entry(work, &pool->worklist, entry) send_mayday(work); } spin_unlock(&pool->lock); spin_unlock_irq(&wq_mayday_lock); mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); } /** * maybe_create_worker - create a new worker if necessary * @pool: pool to create a new worker for * * Create a new worker for @pool if necessary. @pool is guaranteed to * have at least one idle worker on return from this function. If * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is * sent to all rescuers with works scheduled on @pool to resolve * possible allocation deadlock. * * On return, need_to_create_worker() is guaranteed to be %false and * may_start_working() %true. * * LOCKING: * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Does GFP_KERNEL allocations. Called only from * manager. * * RETURNS: * %false if no action was taken and pool->lock stayed locked, %true * otherwise. */ static bool maybe_create_worker(struct worker_pool *pool) __releases(&pool->lock) __acquires(&pool->lock) { if (!need_to_create_worker(pool)) return false; restart: spin_unlock_irq(&pool->lock); /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); while (true) { struct worker *worker; worker = create_worker(pool); if (worker) { del_timer_sync(&pool->mayday_timer); spin_lock_irq(&pool->lock); start_worker(worker); if (WARN_ON_ONCE(need_to_create_worker(pool))) goto restart; return true; } if (!need_to_create_worker(pool)) break; __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(CREATE_COOLDOWN); if (!need_to_create_worker(pool)) break; } del_timer_sync(&pool->mayday_timer); spin_lock_irq(&pool->lock); if (need_to_create_worker(pool)) goto restart; return true; } /** * maybe_destroy_worker - destroy workers which have been idle for a while * @pool: pool to destroy workers for * * Destroy @pool workers which have been idle for longer than * IDLE_WORKER_TIMEOUT. * * LOCKING: * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Called only from manager. * * RETURNS: * %false if no action was taken and pool->lock stayed locked, %true * otherwise. */ static bool maybe_destroy_workers(struct worker_pool *pool) { bool ret = false; while (too_many_workers(pool)) { struct worker *worker; unsigned long expires; worker = list_entry(pool->idle_list.prev, struct worker, entry); expires = worker->last_active + IDLE_WORKER_TIMEOUT; if (time_before(jiffies, expires)) { mod_timer(&pool->idle_timer, expires); break; } destroy_worker(worker); ret = true; } return ret; } /** * manage_workers - manage worker pool * @worker: self * * Assume the manager role and manage the worker pool @worker belongs * to. At any given time, there can be only zero or one manager per * pool. The exclusion is handled automatically by this function. * * The caller can safely start processing works on false return. On * true return, it's guaranteed that need_to_create_worker() is false * and may_start_working() is true. * * CONTEXT: * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Does GFP_KERNEL allocations. * * RETURNS: * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Does GFP_KERNEL allocations. */ static bool manage_workers(struct worker *worker) { struct worker_pool *pool = worker->pool; bool ret = false; /* * Managership is governed by two mutexes - manager_arb and * manager_mutex. manager_arb handles arbitration of manager role. * Anyone who successfully grabs manager_arb wins the arbitration * and becomes the manager. mutex_trylock() on pool->manager_arb * failure while holding pool->lock reliably indicates that someone * else is managing the pool and the worker which failed trylock * can proceed to executing work items. This means that anyone * grabbing manager_arb is responsible for actually performing * manager duties. If manager_arb is grabbed and released without * actual management, the pool may stall indefinitely. * * manager_mutex is used for exclusion of actual management * operations. The holder of manager_mutex can be sure that none * of management operations, including creation and destruction of * workers, won't take place until the mutex is released. Because * manager_mutex doesn't interfere with manager role arbitration, * it is guaranteed that the pool's management, while may be * delayed, won't be disturbed by someone else grabbing * manager_mutex. */ if (!mutex_trylock(&pool->manager_arb)) return ret; /* * With manager arbitration won, manager_mutex would be free in * most cases. trylock first without dropping @pool->lock. */ if (unlikely(!mutex_trylock(&pool->manager_mutex))) { spin_unlock_irq(&pool->lock); mutex_lock(&pool->manager_mutex); spin_lock_irq(&pool->lock); ret = true; } pool->flags &= ~POOL_MANAGE_WORKERS; /* * Destroy and then create so that may_start_working() is true * on return. */ ret |= maybe_destroy_workers(pool); ret |= maybe_create_worker(pool); mutex_unlock(&pool->manager_mutex); mutex_unlock(&pool->manager_arb); return ret; } /** * process_one_work - process single work * @worker: self * @work: work to process * * Process @work. This function contains all the logics necessary to * process a single work including synchronization against and * interaction with other workers on the same cpu, queueing and * flushing. As long as context requirement is met, any worker can * call this function to process a work. * * CONTEXT: * spin_lock_irq(pool->lock) which is released and regrabbed. */ static void process_one_work(struct worker *worker, struct work_struct *work) __releases(&pool->lock) __acquires(&pool->lock) { struct pool_workqueue *pwq = get_work_pwq(work); struct worker_pool *pool = worker->pool; bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; int work_color; struct worker *collision; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from * inside the function that is called from it, this we need to * take into account for lockdep too. To avoid bogus "held * lock freed" warnings as well as problems when looking into * work->lockdep_map, make a copy and use that here. */ struct lockdep_map lockdep_map; lockdep_copy_map(&lockdep_map, &work->lockdep_map); #endif /* * Ensure we're on the correct CPU. DISASSOCIATED test is * necessary to avoid spurious warnings from rescuers servicing the * unbound or a disassociated pool. */ WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && !(pool->flags & POOL_DISASSOCIATED) && raw_smp_processor_id() != pool->cpu); /* * A single work shouldn't be executed concurrently by * multiple workers on a single cpu. Check whether anyone is * already processing the work. If so, defer the work to the * currently executing one. */ collision = find_worker_executing_work(pool, work); if (unlikely(collision)) { move_linked_works(work, &collision->scheduled, NULL); return; } /* claim and dequeue */ debug_work_deactivate(work); hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); worker->current_work = work; worker->current_func = work->func; worker->current_pwq = pwq; work_color = get_work_color(work); list_del_init(&work->entry); /* * CPU intensive works don't participate in concurrency * management. They're the scheduler's responsibility. */ if (unlikely(cpu_intensive)) worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); /* * Unbound pool isn't concurrency managed and work items should be * executed ASAP. Wake up another worker if necessary. */ if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) wake_up_worker(pool); /* * Record the last pool and clear PENDING which should be the last * update to @work. Also, do this inside @pool->lock so that * PENDING and queued state changes happen together while IRQ is * disabled. */ set_work_pool_and_clear_pending(work, pool->id); spin_unlock_irq(&pool->lock); lock_map_acquire_read(&pwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); trace_workqueue_execute_start(work); worker->current_func(work); /* * While we must be careful to not use "work" after this, the trace * point will only record its address. */ trace_workqueue_execute_end(work); lock_map_release(&lockdep_map); lock_map_release(&pwq->wq->lockdep_map); if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" " last function: %pf\n", current->comm, preempt_count(), task_pid_nr(current), worker->current_func); debug_show_held_locks(current); BUG_ON(PANIC_CORRUPTION); dump_stack(); } /* * The following prevents a kworker from hogging CPU on !PREEMPT * kernels, where a requeueing work item waiting for something to * happen could deadlock with stop_machine as such work item could * indefinitely requeue itself while all other CPUs are trapped in * stop_machine. */ cond_resched(); spin_lock_irq(&pool->lock); /* clear cpu intensive status */ if (unlikely(cpu_intensive)) worker_clr_flags(worker, WORKER_CPU_INTENSIVE); /* we're done with it, release */ hash_del(&worker->hentry); worker->current_work = NULL; worker->current_func = NULL; worker->current_pwq = NULL; worker->desc_valid = false; pwq_dec_nr_in_flight(pwq, work_color); } /** * process_scheduled_works - process scheduled works * @worker: self * * Process all scheduled works. Please note that the scheduled list * may change while processing a work, so this function repeatedly * fetches a work from the top and executes it. * * CONTEXT: * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. */ static void process_scheduled_works(struct worker *worker) { while (!list_empty(&worker->scheduled)) { struct work_struct *work = list_first_entry(&worker->scheduled, struct work_struct, entry); process_one_work(worker, work); } } /** * worker_thread - the worker thread function * @__worker: self * * The worker thread function. All workers belong to a worker_pool - * either a per-cpu one or dynamic unbound one. These workers process all * work items regardless of their specific target workqueue. The only * exception is work items which belong to workqueues with a rescuer which * will be explained in rescuer_thread(). */ static int worker_thread(void *__worker) { struct worker *worker = __worker; struct worker_pool *pool = worker->pool; /* tell the scheduler that this is a workqueue worker */ worker->task->flags |= PF_WQ_WORKER; woke_up: spin_lock_irq(&pool->lock); /* am I supposed to die? */ if (unlikely(worker->flags & WORKER_DIE)) { spin_unlock_irq(&pool->lock); WARN_ON_ONCE(!list_empty(&worker->entry)); worker->task->flags &= ~PF_WQ_WORKER; return 0; } worker_leave_idle(worker); recheck: /* no more worker necessary? */ if (!need_more_worker(pool)) goto sleep; /* do we need to manage? */ if (unlikely(!may_start_working(pool)) && manage_workers(worker)) goto recheck; /* * ->scheduled list can only be filled while a worker is * preparing to process a work or actually processing it. * Make sure nobody diddled with it while I was sleeping. */ WARN_ON_ONCE(!list_empty(&worker->scheduled)); /* * Finish PREP stage. We're guaranteed to have at least one idle * worker or that someone else has already assumed the manager * role. This is where @worker starts participating in concurrency * management if applicable and concurrency management is restored * after being rebound. See rebind_workers() for details. */ worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); do { struct work_struct *work = list_first_entry(&pool->worklist, struct work_struct, entry); if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { /* optimization path, not strictly necessary */ process_one_work(worker, work); if (unlikely(!list_empty(&worker->scheduled))) process_scheduled_works(worker); } else { move_linked_works(work, &worker->scheduled, NULL); process_scheduled_works(worker); } } while (keep_working(pool)); worker_set_flags(worker, WORKER_PREP, false); sleep: if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker)) goto recheck; /* * pool->lock is held and there's no work to process and no need to * manage, sleep. Workers are woken up only while holding * pool->lock or from local cpu, so setting the current state * before releasing pool->lock is enough to prevent losing any * event. */ worker_enter_idle(worker); __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&pool->lock); schedule(); goto woke_up; } /** * rescuer_thread - the rescuer thread function * @__rescuer: self * * Workqueue rescuer thread function. There's one rescuer for each * workqueue which has WQ_MEM_RECLAIM set. * * Regular work processing on a pool may block trying to create a new * worker which uses GFP_KERNEL allocation which has slight chance of * developing into deadlock if some works currently on the same queue * need to be processed to satisfy the GFP_KERNEL allocation. This is * the problem rescuer solves. * * When such condition is possible, the pool summons rescuers of all * workqueues which have works queued on the pool and let them process * those works so that forward progress can be guaranteed. * * This should happen rarely. */ static int rescuer_thread(void *__rescuer) { struct worker *rescuer = __rescuer; struct workqueue_struct *wq = rescuer->rescue_wq; struct list_head *scheduled = &rescuer->scheduled; bool should_stop; set_user_nice(current, RESCUER_NICE_LEVEL); /* * Mark rescuer as worker too. As WORKER_PREP is never cleared, it * doesn't participate in concurrency management. */ rescuer->task->flags |= PF_WQ_WORKER; repeat: set_current_state(TASK_INTERRUPTIBLE); /* * By the time the rescuer is requested to stop, the workqueue * shouldn't have any work pending, but @wq->maydays may still have * pwq(s) queued. This can happen by non-rescuer workers consuming * all the work items before the rescuer got to them. Go through * @wq->maydays processing before acting on should_stop so that the * list is always empty on exit. */ should_stop = kthread_should_stop(); /* see whether any pwq is asking for help */ spin_lock_irq(&wq_mayday_lock); while (!list_empty(&wq->maydays)) { struct pool_workqueue *pwq = list_first_entry(&wq->maydays, struct pool_workqueue, mayday_node); struct worker_pool *pool = pwq->pool; struct work_struct *work, *n; __set_current_state(TASK_RUNNING); list_del_init(&pwq->mayday_node); spin_unlock_irq(&wq_mayday_lock); /* migrate to the target cpu if possible */ worker_maybe_bind_and_lock(pool); rescuer->pool = pool; /* * Slurp in all works issued via this workqueue and * process'em. */ WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); list_for_each_entry_safe(work, n, &pool->worklist, entry) if (get_work_pwq(work) == pwq) move_linked_works(work, scheduled, &n); process_scheduled_works(rescuer); /* * Put the reference grabbed by send_mayday(). @pool won't * go away while we're holding its lock. */ put_pwq(pwq); /* * Leave this pool. If keep_working() is %true, notify a * regular worker; otherwise, we end up with 0 concurrency * and stalling the execution. */ if (keep_working(pool)) wake_up_worker(pool); rescuer->pool = NULL; spin_unlock(&pool->lock); spin_lock(&wq_mayday_lock); } spin_unlock_irq(&wq_mayday_lock); if (should_stop) { __set_current_state(TASK_RUNNING); rescuer->task->flags &= ~PF_WQ_WORKER; return 0; } /* rescuers should never participate in concurrency management */ WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); schedule(); goto repeat; } struct wq_barrier { struct work_struct work; struct completion done; }; static void wq_barrier_func(struct work_struct *work) { struct wq_barrier *barr = container_of(work, struct wq_barrier, work); complete(&barr->done); } /** * insert_wq_barrier - insert a barrier work * @pwq: pwq to insert barrier into * @barr: wq_barrier to insert * @target: target work to attach @barr to * @worker: worker currently executing @target, NULL if @target is not executing * * @barr is linked to @target such that @barr is completed only after * @target finishes execution. Please note that the ordering * guarantee is observed only with respect to @target and on the local * cpu. * * Currently, a queued barrier can't be canceled. This is because * try_to_grab_pending() can't determine whether the work to be * grabbed is at the head of the queue and thus can't clear LINKED * flag of the previous work while there must be a valid next work * after a work with LINKED flag set. * * Note that when @worker is non-NULL, @target may be modified * underneath us, so we can't reliably determine pwq from @target. * * CONTEXT: * spin_lock_irq(pool->lock). */ static void insert_wq_barrier(struct pool_workqueue *pwq, struct wq_barrier *barr, struct work_struct *target, struct worker *worker) { struct list_head *head; unsigned int linked = 0; /* * debugobject calls are safe here even with pool->lock locked * as we know for sure that this will not trigger any of the * checks and call back into the fixup functions where we * might deadlock. */ INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion(&barr->done); /* * If @target is currently being executed, schedule the * barrier to the worker; otherwise, put it after @target. */ if (worker) head = worker->scheduled.next; else { unsigned long *bits = work_data_bits(target); head = target->entry.next; /* there can already be other linked works, inherit and set */ linked = *bits & WORK_STRUCT_LINKED; __set_bit(WORK_STRUCT_LINKED_BIT, bits); } debug_work_activate(&barr->work); insert_work(pwq, &barr->work, head, work_color_to_flags(WORK_NO_COLOR) | linked); } /** * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing * @wq: workqueue being flushed * @flush_color: new flush color, < 0 for no-op * @work_color: new work color, < 0 for no-op * * Prepare pwqs for workqueue flushing. * * If @flush_color is non-negative, flush_color on all pwqs should be * -1. If no pwq has in-flight commands at the specified color, all * pwq->flush_color's stay at -1 and %false is returned. If any pwq * has in flight commands, its pwq->flush_color is set to * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq * wakeup logic is armed and %true is returned. * * The caller should have initialized @wq->first_flusher prior to * calling this function with non-negative @flush_color. If * @flush_color is negative, no flush color update is done and %false * is returned. * * If @work_color is non-negative, all pwqs should have the same * work_color which is previous to @work_color and all will be * advanced to @work_color. * * CONTEXT: * mutex_lock(wq->mutex). * * RETURNS: * %true if @flush_color >= 0 and there's something to flush. %false * otherwise. */ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, int flush_color, int work_color) { bool wait = false; struct pool_workqueue *pwq; if (flush_color >= 0) { WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); atomic_set(&wq->nr_pwqs_to_flush, 1); } for_each_pwq(pwq, wq) { struct worker_pool *pool = pwq->pool; spin_lock_irq(&pool->lock); if (flush_color >= 0) { WARN_ON_ONCE(pwq->flush_color != -1); if (pwq->nr_in_flight[flush_color]) { pwq->flush_color = flush_color; atomic_inc(&wq->nr_pwqs_to_flush); wait = true; } } if (work_color >= 0) { WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); pwq->work_color = work_color; } spin_unlock_irq(&pool->lock); } if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) complete(&wq->first_flusher->done); return wait; } /** * flush_workqueue - ensure that any scheduled work has run to completion. * @wq: workqueue to flush * * This function sleeps until all work items which were queued on entry * have finished execution, but it is not livelocked by new incoming ones. */ void flush_workqueue(struct workqueue_struct *wq) { struct wq_flusher this_flusher = { .list = LIST_HEAD_INIT(this_flusher.list), .flush_color = -1, .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), }; int next_color; lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); mutex_lock(&wq->mutex); /* * Start-to-wait phase */ next_color = work_next_color(wq->work_color); if (next_color != wq->flush_color) { /* * Color space is not full. The current work_color * becomes our flush_color and work_color is advanced * by one. */ WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); this_flusher.flush_color = wq->work_color; wq->work_color = next_color; if (!wq->first_flusher) { /* no flush in progress, become the first flusher */ WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); wq->first_flusher = &this_flusher; if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, wq->work_color)) { /* nothing to flush, done */ wq->flush_color = next_color; wq->first_flusher = NULL; goto out_unlock; } } else { /* wait in queue */ WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); list_add_tail(&this_flusher.list, &wq->flusher_queue); flush_workqueue_prep_pwqs(wq, -1, wq->work_color); } } else { /* * Oops, color space is full, wait on overflow queue. * The next flush completion will assign us * flush_color and transfer to flusher_queue. */ list_add_tail(&this_flusher.list, &wq->flusher_overflow); } mutex_unlock(&wq->mutex); wait_for_completion(&this_flusher.done); /* * Wake-up-and-cascade phase * * First flushers are responsible for cascading flushes and * handling overflow. Non-first flushers can simply return. */ if (wq->first_flusher != &this_flusher) return; mutex_lock(&wq->mutex); /* we might have raced, check again with mutex held */ if (wq->first_flusher != &this_flusher) goto out_unlock; wq->first_flusher = NULL; WARN_ON_ONCE(!list_empty(&this_flusher.list)); WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); while (true) { struct wq_flusher *next, *tmp; /* complete all the flushers sharing the current flush color */ list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { if (next->flush_color != wq->flush_color) break; list_del_init(&next->list); complete(&next->done); } WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && wq->flush_color != work_next_color(wq->work_color)); /* this flush_color is finished, advance by one */ wq->flush_color = work_next_color(wq->flush_color); /* one color has been freed, handle overflow queue */ if (!list_empty(&wq->flusher_overflow)) { /* * Assign the same color to all overflowed * flushers, advance work_color and append to * flusher_queue. This is the start-to-wait * phase for these overflowed flushers. */ list_for_each_entry(tmp, &wq->flusher_overflow, list) tmp->flush_color = wq->work_color; wq->work_color = work_next_color(wq->work_color); list_splice_tail_init(&wq->flusher_overflow, &wq->flusher_queue); flush_workqueue_prep_pwqs(wq, -1, wq->work_color); } if (list_empty(&wq->flusher_queue)) { WARN_ON_ONCE(wq->flush_color != wq->work_color); break; } /* * Need to flush more colors. Make the next flusher * the new first flusher and arm pwqs. */ WARN_ON_ONCE(wq->flush_color == wq->work_color); WARN_ON_ONCE(wq->flush_color != next->flush_color); list_del_init(&next->list); wq->first_flusher = next; if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) break; /* * Meh... this color is already done, clear first * flusher and repeat cascading. */ wq->first_flusher = NULL; } out_unlock: mutex_unlock(&wq->mutex); } EXPORT_SYMBOL_GPL(flush_workqueue); /** * drain_workqueue - drain a workqueue * @wq: workqueue to drain * * Wait until the workqueue becomes empty. While draining is in progress, * only chain queueing is allowed. IOW, only currently pending or running * work items on @wq can queue further work items on it. @wq is flushed * repeatedly until it becomes empty. The number of flushing is detemined * by the depth of chaining and should be relatively short. Whine if it * takes too long. */ void drain_workqueue(struct workqueue_struct *wq) { unsigned int flush_cnt = 0; struct pool_workqueue *pwq; /* * __queue_work() needs to test whether there are drainers, is much * hotter than drain_workqueue() and already looks at @wq->flags. * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. */ mutex_lock(&wq->mutex); if (!wq->nr_drainers++) wq->flags |= __WQ_DRAINING; mutex_unlock(&wq->mutex); reflush: flush_workqueue(wq); mutex_lock(&wq->mutex); for_each_pwq(pwq, wq) { bool drained; spin_lock_irq(&pwq->pool->lock); drained = !pwq->nr_active && list_empty(&pwq->delayed_works); spin_unlock_irq(&pwq->pool->lock); if (drained) continue; if (++flush_cnt == 10 || (flush_cnt % 100 == 0 && flush_cnt <= 1000)) pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", wq->name, flush_cnt); mutex_unlock(&wq->mutex); goto reflush; } if (!--wq->nr_drainers) wq->flags &= ~__WQ_DRAINING; mutex_unlock(&wq->mutex); } EXPORT_SYMBOL_GPL(drain_workqueue); static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) { struct worker *worker = NULL; struct worker_pool *pool; struct pool_workqueue *pwq; might_sleep(); local_irq_disable(); pool = get_work_pool(work); if (!pool) { local_irq_enable(); return false; } spin_lock(&pool->lock); /* see the comment in try_to_grab_pending() with the same code */ pwq = get_work_pwq(work); if (pwq) { if (unlikely(pwq->pool != pool)) goto already_gone; } else { worker = find_worker_executing_work(pool, work); if (!worker) goto already_gone; pwq = worker->current_pwq; } insert_wq_barrier(pwq, barr, work, worker); spin_unlock_irq(&pool->lock); /* * If @max_active is 1 or rescuer is in use, flushing another work * item on the same workqueue may lead to deadlock. Make sure the * flusher is not running on the same workqueue by verifying write * access. */ if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) lock_map_acquire(&pwq->wq->lockdep_map); else lock_map_acquire_read(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map); return true; already_gone: spin_unlock_irq(&pool->lock); return false; } /** * flush_work - wait for a work to finish executing the last queueing instance * @work: the work to flush * * Wait until @work has finished execution. @work is guaranteed to be idle * on return if it hasn't been requeued since flush started. * * RETURNS: * %true if flush_work() waited for the work to finish execution, * %false if it was already idle. */ bool flush_work(struct work_struct *work) { struct wq_barrier barr; lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); if (start_flush_work(work, &barr)) { wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); return true; } else { return false; } } EXPORT_SYMBOL_GPL(flush_work); struct cwt_wait { wait_queue_t wait; struct work_struct *work; }; static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); if (cwait->work != key) return 0; return autoremove_wake_function(wait, mode, sync, key); } static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) { static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); unsigned long flags; int ret; do { ret = try_to_grab_pending(work, is_dwork, &flags); /* * If someone else is already canceling, wait for it to * finish. flush_work() doesn't work for PREEMPT_NONE * because we may get scheduled between @work's completion * and the other canceling task resuming and clearing * CANCELING - flush_work() will return false immediately * as @work is no longer busy, try_to_grab_pending() will * return -ENOENT as @work is still being canceled and the * other canceling task won't be able to clear CANCELING as * we're hogging the CPU. * * Let's wait for completion using a waitqueue. As this * may lead to the thundering herd problem, use a custom * wake function which matches @work along with exclusive * wait and wakeup. */ if (unlikely(ret == -ENOENT)) { struct cwt_wait cwait; init_wait(&cwait.wait); cwait.wait.func = cwt_wakefn; cwait.work = work; prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, TASK_UNINTERRUPTIBLE); if (work_is_canceling(work)) schedule(); finish_wait(&cancel_waitq, &cwait.wait); } } while (unlikely(ret < 0)); /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); local_irq_restore(flags); flush_work(work); clear_work_data(work); /* * Paired with prepare_to_wait() above so that either * waitqueue_active() is visible here or !work_is_canceling() is * visible there. */ smp_mb(); if (waitqueue_active(&cancel_waitq)) __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); return ret; } /** * cancel_work_sync - cancel a work and wait for it to finish * @work: the work to cancel * * Cancel @work and wait for its execution to finish. This function * can be used even if the work re-queues itself or migrates to * another workqueue. On return from this function, @work is * guaranteed to be not pending or executing on any CPU. * * cancel_work_sync(&delayed_work->work) must not be used for * delayed_work's. Use cancel_delayed_work_sync() instead. * * The caller must ensure that the workqueue on which @work was last * queued can't be destroyed before this function returns. * * RETURNS: * %true if @work was pending, %false otherwise. */ bool cancel_work_sync(struct work_struct *work) { return __cancel_work_timer(work, false); } EXPORT_SYMBOL_GPL(cancel_work_sync); /** * flush_delayed_work - wait for a dwork to finish executing the last queueing * @dwork: the delayed work to flush * * Delayed timer is cancelled and the pending work is queued for * immediate execution. Like flush_work(), this function only * considers the last queueing instance of @dwork. * * RETURNS: * %true if flush_work() waited for the work to finish execution, * %false if it was already idle. */ bool flush_delayed_work(struct delayed_work *dwork) { local_irq_disable(); if (del_timer_sync(&dwork->timer)) __queue_work(dwork->cpu, dwork->wq, &dwork->work); local_irq_enable(); return flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); /** * cancel_delayed_work - cancel a delayed work * @dwork: delayed_work to cancel * * Kill off a pending delayed_work. Returns %true if @dwork was pending * and canceled; %false if wasn't pending. Note that the work callback * function may still be running on return, unless it returns %true and the * work doesn't re-arm itself. Explicitly flush or use * cancel_delayed_work_sync() to wait on it. * * This function is safe to call from any context including IRQ handler. */ bool cancel_delayed_work(struct delayed_work *dwork) { unsigned long flags; int ret; do { ret = try_to_grab_pending(&dwork->work, true, &flags); } while (unlikely(ret == -EAGAIN)); if (unlikely(ret < 0)) return false; set_work_pool_and_clear_pending(&dwork->work, get_work_pool_id(&dwork->work)); local_irq_restore(flags); return ret; } EXPORT_SYMBOL(cancel_delayed_work); /** * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish * @dwork: the delayed work cancel * * This is cancel_work_sync() for delayed works. * * RETURNS: * %true if @dwork was pending, %false otherwise. */ bool cancel_delayed_work_sync(struct delayed_work *dwork) { return __cancel_work_timer(&dwork->work, true); } EXPORT_SYMBOL(cancel_delayed_work_sync); /** * schedule_on_each_cpu - execute a function synchronously on each online CPU * @func: the function to call * * schedule_on_each_cpu() executes @func on each online CPU using the * system workqueue and blocks until all CPUs have completed. * schedule_on_each_cpu() is very slow. * * RETURNS: * 0 on success, -errno on failure. */ int schedule_on_each_cpu(work_func_t func) { int cpu; struct work_struct __percpu *works; works = alloc_percpu(struct work_struct); if (!works) return -ENOMEM; get_online_cpus(); for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); INIT_WORK(work, func); schedule_work_on(cpu, work); } for_each_online_cpu(cpu) flush_work(per_cpu_ptr(works, cpu)); put_online_cpus(); free_percpu(works); return 0; } /** * flush_scheduled_work - ensure that any scheduled work has run to completion. * * Forces execution of the kernel-global workqueue and blocks until its * completion. * * Think twice before calling this function! It's very easy to get into * trouble if you don't take great care. Either of the following situations * will lead to deadlock: * * One of the work items currently on the workqueue needs to acquire * a lock held by your code or its caller. * * Your code is running in the context of a work routine. * * They will be detected by lockdep when they occur, but the first might not * occur very often. It depends on what work items are on the workqueue and * what locks they need, which you have no control over. * * In most situations flushing the entire workqueue is overkill; you merely * need to know that a particular work item isn't queued and isn't running. * In such cases you should use cancel_delayed_work_sync() or * cancel_work_sync() instead. */ void flush_scheduled_work(void) { flush_workqueue(system_wq); } EXPORT_SYMBOL(flush_scheduled_work); /** * execute_in_process_context - reliably execute the routine with user context * @fn: the function to execute * @ew: guaranteed storage for the execute work structure (must * be available when the work executes) * * Executes the function immediately if process context is available, * otherwise schedules the function for delayed execution. * * Returns: 0 - function was executed * 1 - function was scheduled for execution */ int execute_in_process_context(work_func_t fn, struct execute_work *ew) { if (!in_interrupt()) { fn(&ew->work); return 0; } INIT_WORK(&ew->work, fn); schedule_work(&ew->work); return 1; } EXPORT_SYMBOL_GPL(execute_in_process_context); #ifdef CONFIG_SYSFS /* * Workqueues with WQ_SYSFS flag set is visible to userland via * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the * following attributes. * * per_cpu RO bool : whether the workqueue is per-cpu or unbound * max_active RW int : maximum number of in-flight work items * * Unbound workqueues have the following extra attributes. * * id RO int : the associated pool ID * nice RW int : nice value of the workers * cpumask RW mask : bitmask of allowed CPUs for the workers */ struct wq_device { struct workqueue_struct *wq; struct device dev; }; static struct workqueue_struct *dev_to_wq(struct device *dev) { struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); return wq_dev->wq; } static ssize_t wq_per_cpu_show(struct device *dev, struct device_attribute *attr, char *buf) { struct workqueue_struct *wq = dev_to_wq(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); } static ssize_t wq_max_active_show(struct device *dev, struct device_attribute *attr, char *buf) { struct workqueue_struct *wq = dev_to_wq(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); } static ssize_t wq_max_active_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct workqueue_struct *wq = dev_to_wq(dev); int val; if (sscanf(buf, "%d", &val) != 1 || val <= 0) return -EINVAL; workqueue_set_max_active(wq, val); return count; } static struct device_attribute wq_sysfs_attrs[] = { __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL), __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store), __ATTR_NULL, }; static ssize_t wq_pool_ids_show(struct device *dev, struct device_attribute *attr, char *buf) { struct workqueue_struct *wq = dev_to_wq(dev); const char *delim = ""; int node, written = 0; rcu_read_lock_sched(); for_each_node(node) { written += scnprintf(buf + written, PAGE_SIZE - written, "%s%d:%d", delim, node, unbound_pwq_by_node(wq, node)->pool->id); delim = " "; } written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); rcu_read_unlock_sched(); return written; } static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, char *buf) { struct workqueue_struct *wq = dev_to_wq(dev); int written; mutex_lock(&wq->mutex); written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); mutex_unlock(&wq->mutex); return written; } /* prepare workqueue_attrs for sysfs store operations */ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) { struct workqueue_attrs *attrs; attrs = alloc_workqueue_attrs(GFP_KERNEL); if (!attrs) return NULL; mutex_lock(&wq->mutex); copy_workqueue_attrs(attrs, wq->unbound_attrs); mutex_unlock(&wq->mutex); return attrs; } static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct workqueue_struct *wq = dev_to_wq(dev); struct workqueue_attrs *attrs; int ret; attrs = wq_sysfs_prep_attrs(wq); if (!attrs) return -ENOMEM; if (sscanf(buf, "%d", &attrs->nice) == 1 && attrs->nice >= -20 && attrs->nice <= 19) ret = apply_workqueue_attrs(wq, attrs); else ret = -EINVAL; free_workqueue_attrs(attrs); return ret ?: count; } static ssize_t wq_cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { struct workqueue_struct *wq = dev_to_wq(dev); int written; mutex_lock(&wq->mutex); written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask); mutex_unlock(&wq->mutex); written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); return written; } static ssize_t wq_cpumask_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct workqueue_struct *wq = dev_to_wq(dev); struct workqueue_attrs *attrs; int ret; attrs = wq_sysfs_prep_attrs(wq); if (!attrs) return -ENOMEM; ret = cpumask_parse(buf, attrs->cpumask); if (!ret) ret = apply_workqueue_attrs(wq, attrs); free_workqueue_attrs(attrs); return ret ?: count; } static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr, char *buf) { struct workqueue_struct *wq = dev_to_wq(dev); int written; mutex_lock(&wq->mutex); written = scnprintf(buf, PAGE_SIZE, "%d\n", !wq->unbound_attrs->no_numa); mutex_unlock(&wq->mutex); return written; } static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct workqueue_struct *wq = dev_to_wq(dev); struct workqueue_attrs *attrs; int v, ret; attrs = wq_sysfs_prep_attrs(wq); if (!attrs) return -ENOMEM; ret = -EINVAL; if (sscanf(buf, "%d", &v) == 1) { attrs->no_numa = !v; ret = apply_workqueue_attrs(wq, attrs); } free_workqueue_attrs(attrs); return ret ?: count; } static struct device_attribute wq_sysfs_unbound_attrs[] = { __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL), __ATTR(nice, 0644, wq_nice_show, wq_nice_store), __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), __ATTR(numa, 0644, wq_numa_show, wq_numa_store), __ATTR_NULL, }; static struct bus_type wq_subsys = { .name = "workqueue", .dev_attrs = wq_sysfs_attrs, }; static int __init wq_sysfs_init(void) { return subsys_virtual_register(&wq_subsys, NULL); } core_initcall(wq_sysfs_init); static void wq_device_release(struct device *dev) { struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); kfree(wq_dev); } /** * workqueue_sysfs_register - make a workqueue visible in sysfs * @wq: the workqueue to register * * Expose @wq in sysfs under /sys/bus/workqueue/devices. * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set * which is the preferred method. * * Workqueue user should use this function directly iff it wants to apply * workqueue_attrs before making the workqueue visible in sysfs; otherwise, * apply_workqueue_attrs() may race against userland updating the * attributes. * * Returns 0 on success, -errno on failure. */ int workqueue_sysfs_register(struct workqueue_struct *wq) { struct wq_device *wq_dev; int ret; /* * Adjusting max_active or creating new pwqs by applyting * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ if (WARN_ON(wq->flags & __WQ_ORDERED)) return -EINVAL; wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); if (!wq_dev) return -ENOMEM; wq_dev->wq = wq; wq_dev->dev.bus = &wq_subsys; wq_dev->dev.init_name = wq->name; wq_dev->dev.release = wq_device_release; /* * unbound_attrs are created separately. Suppress uevent until * everything is ready. */ dev_set_uevent_suppress(&wq_dev->dev, true); ret = device_register(&wq_dev->dev); if (ret) { kfree(wq_dev); wq->wq_dev = NULL; return ret; } if (wq->flags & WQ_UNBOUND) { struct device_attribute *attr; for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { ret = device_create_file(&wq_dev->dev, attr); if (ret) { device_unregister(&wq_dev->dev); wq->wq_dev = NULL; return ret; } } } dev_set_uevent_suppress(&wq_dev->dev, false); kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); return 0; } /** * workqueue_sysfs_unregister - undo workqueue_sysfs_register() * @wq: the workqueue to unregister * * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. */ static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { struct wq_device *wq_dev = wq->wq_dev; if (!wq->wq_dev) return; wq->wq_dev = NULL; device_unregister(&wq_dev->dev); } #else /* CONFIG_SYSFS */ static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } #endif /* CONFIG_SYSFS */ /** * free_workqueue_attrs - free a workqueue_attrs * @attrs: workqueue_attrs to free * * Undo alloc_workqueue_attrs(). */ void free_workqueue_attrs(struct workqueue_attrs *attrs) { if (attrs) { free_cpumask_var(attrs->cpumask); kfree(attrs); } } /** * alloc_workqueue_attrs - allocate a workqueue_attrs * @gfp_mask: allocation mask to use * * Allocate a new workqueue_attrs, initialize with default settings and * return it. Returns NULL on failure. */ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) { struct workqueue_attrs *attrs; attrs = kzalloc(sizeof(*attrs), gfp_mask); if (!attrs) goto fail; if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) goto fail; cpumask_copy(attrs->cpumask, cpu_possible_mask); return attrs; fail: free_workqueue_attrs(attrs); return NULL; } static void copy_workqueue_attrs(struct workqueue_attrs *to, const struct workqueue_attrs *from) { to->nice = from->nice; cpumask_copy(to->cpumask, from->cpumask); /* * Unlike hash and equality test, this function doesn't ignore * ->no_numa as it is used for both pool and wq attrs. Instead, * get_unbound_pool() explicitly clears ->no_numa after copying. */ to->no_numa = from->no_numa; } /* hash value of the content of @attr */ static u32 wqattrs_hash(const struct workqueue_attrs *attrs) { u32 hash = 0; hash = jhash_1word(attrs->nice, hash); hash = jhash(cpumask_bits(attrs->cpumask), BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); return hash; } /* content equality test */ static bool wqattrs_equal(const struct workqueue_attrs *a, const struct workqueue_attrs *b) { if (a->nice != b->nice) return false; if (!cpumask_equal(a->cpumask, b->cpumask)) return false; return true; } /** * init_worker_pool - initialize a newly zalloc'd worker_pool * @pool: worker_pool to initialize * * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. * Returns 0 on success, -errno on failure. Even on failure, all fields * inside @pool proper are initialized and put_unbound_pool() can be called * on @pool safely to release it. */ static int init_worker_pool(struct worker_pool *pool) { spin_lock_init(&pool->lock); pool->id = -1; pool->cpu = -1; pool->node = NUMA_NO_NODE; pool->flags |= POOL_DISASSOCIATED; INIT_LIST_HEAD(&pool->worklist); INIT_LIST_HEAD(&pool->idle_list); hash_init(pool->busy_hash); init_timer_deferrable(&pool->idle_timer); pool->idle_timer.function = idle_worker_timeout; pool->idle_timer.data = (unsigned long)pool; setup_timer(&pool->mayday_timer, pool_mayday_timeout, (unsigned long)pool); mutex_init(&pool->manager_arb); mutex_init(&pool->manager_mutex); idr_init(&pool->worker_idr); INIT_HLIST_NODE(&pool->hash_node); pool->refcnt = 1; /* shouldn't fail above this point */ pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); if (!pool->attrs) return -ENOMEM; return 0; } static void rcu_free_pool(struct rcu_head *rcu) { struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); idr_destroy(&pool->worker_idr); free_workqueue_attrs(pool->attrs); kfree(pool); } /** * put_unbound_pool - put a worker_pool * @pool: worker_pool to put * * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU * safe manner. get_unbound_pool() calls this function on its failure path * and this function should be able to release pools which went through, * successfully or not, init_worker_pool(). * * Should be called with wq_pool_mutex held. */ static void put_unbound_pool(struct worker_pool *pool) { struct worker *worker; lockdep_assert_held(&wq_pool_mutex); if (--pool->refcnt) return; /* sanity checks */ if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || WARN_ON(!list_empty(&pool->worklist))) return; /* release id and unhash */ if (pool->id >= 0) idr_remove(&worker_pool_idr, pool->id); hash_del(&pool->hash_node); /* * Become the manager and destroy all workers. Grabbing * manager_arb prevents @pool's workers from blocking on * manager_mutex. */ mutex_lock(&pool->manager_arb); mutex_lock(&pool->manager_mutex); spin_lock_irq(&pool->lock); while ((worker = first_worker(pool))) destroy_worker(worker); WARN_ON(pool->nr_workers || pool->nr_idle); spin_unlock_irq(&pool->lock); mutex_unlock(&pool->manager_mutex); mutex_unlock(&pool->manager_arb); /* shut down the timers */ del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->mayday_timer); /* sched-RCU protected to allow dereferences from get_work_pool() */ call_rcu_sched(&pool->rcu, rcu_free_pool); } /** * get_unbound_pool - get a worker_pool with the specified attributes * @attrs: the attributes of the worker_pool to get * * Obtain a worker_pool which has the same attributes as @attrs, bump the * reference count and return it. If there already is a matching * worker_pool, it will be used; otherwise, this function attempts to * create a new one. On failure, returns NULL. * * Should be called with wq_pool_mutex held. */ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) { u32 hash = wqattrs_hash(attrs); struct worker_pool *pool; int node; lockdep_assert_held(&wq_pool_mutex); /* do we already have a matching pool? */ hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { if (wqattrs_equal(pool->attrs, attrs)) { pool->refcnt++; goto out_unlock; } } /* nope, create a new one */ pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool || init_worker_pool(pool) < 0) goto fail; if (workqueue_freezing) pool->flags |= POOL_FREEZING; lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ copy_workqueue_attrs(pool->attrs, attrs); /* * no_numa isn't a worker_pool attribute, always clear it. See * 'struct workqueue_attrs' comments for detail. */ pool->attrs->no_numa = false; /* if cpumask is contained inside a NUMA node, we belong to that node */ if (wq_numa_enabled) { for_each_node(node) { if (cpumask_subset(pool->attrs->cpumask, wq_numa_possible_cpumask[node])) { pool->node = node; break; } } } if (worker_pool_assign_id(pool) < 0) goto fail; /* create and start the initial worker */ if (create_and_start_worker(pool) < 0) goto fail; /* install */ hash_add(unbound_pool_hash, &pool->hash_node, hash); out_unlock: return pool; fail: if (pool) put_unbound_pool(pool); return NULL; } static void rcu_free_pwq(struct rcu_head *rcu) { kmem_cache_free(pwq_cache, container_of(rcu, struct pool_workqueue, rcu)); } /* * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt * and needs to be destroyed. */ static void pwq_unbound_release_workfn(struct work_struct *work) { struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, unbound_release_work); struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; bool is_last; if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) return; /* * Unlink @pwq. Synchronization against wq->mutex isn't strictly * necessary on release but do it anyway. It's easier to verify * and consistent with the linking path. */ mutex_lock(&wq->mutex); list_del_rcu(&pwq->pwqs_node); is_last = list_empty(&wq->pwqs); mutex_unlock(&wq->mutex); mutex_lock(&wq_pool_mutex); put_unbound_pool(pool); mutex_unlock(&wq_pool_mutex); call_rcu_sched(&pwq->rcu, rcu_free_pwq); /* * If we're the last pwq going away, @wq is already dead and no one * is gonna access it anymore. Free it. */ if (is_last) { free_workqueue_attrs(wq->unbound_attrs); kfree(wq); } } /** * pwq_adjust_max_active - update a pwq's max_active to the current setting * @pwq: target pool_workqueue * * If @pwq isn't freezing, set @pwq->max_active to the associated * workqueue's saved_max_active and activate delayed work items * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. */ static void pwq_adjust_max_active(struct pool_workqueue *pwq) { struct workqueue_struct *wq = pwq->wq; bool freezable = wq->flags & WQ_FREEZABLE; /* for @wq->saved_max_active */ lockdep_assert_held(&wq->mutex); /* fast exit for non-freezable wqs */ if (!freezable && pwq->max_active == wq->saved_max_active) return; spin_lock_irq(&pwq->pool->lock); if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) { pwq->max_active = wq->saved_max_active; while (!list_empty(&pwq->delayed_works) && pwq->nr_active < pwq->max_active) pwq_activate_first_delayed(pwq); /* * Need to kick a worker after thawed or an unbound wq's * max_active is bumped. It's a slow path. Do it always. */ wake_up_worker(pwq->pool); } else { pwq->max_active = 0; } spin_unlock_irq(&pwq->pool->lock); } /* initialize newly alloced @pwq which is associated with @wq and @pool */ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) { BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); memset(pwq, 0, sizeof(*pwq)); pwq->pool = pool; pwq->wq = wq; pwq->flush_color = -1; pwq->refcnt = 1; INIT_LIST_HEAD(&pwq->delayed_works); INIT_LIST_HEAD(&pwq->pwqs_node); INIT_LIST_HEAD(&pwq->mayday_node); INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); } /* sync @pwq with the current state of its associated wq and link it */ static void link_pwq(struct pool_workqueue *pwq) { struct workqueue_struct *wq = pwq->wq; lockdep_assert_held(&wq->mutex); /* may be called multiple times, ignore if already linked */ if (!list_empty(&pwq->pwqs_node)) return; /* * Set the matching work_color. This is synchronized with * wq->mutex to avoid confusing flush_workqueue(). */ pwq->work_color = wq->work_color; /* sync max_active to the current setting */ pwq_adjust_max_active(pwq); /* link in @pwq */ list_add_rcu(&pwq->pwqs_node, &wq->pwqs); } /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) { struct worker_pool *pool; struct pool_workqueue *pwq; lockdep_assert_held(&wq_pool_mutex); pool = get_unbound_pool(attrs); if (!pool) return NULL; pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); if (!pwq) { put_unbound_pool(pool); return NULL; } init_pwq(pwq, wq, pool); return pwq; } /* undo alloc_unbound_pwq(), used only in the error path */ static void free_unbound_pwq(struct pool_workqueue *pwq) { lockdep_assert_held(&wq_pool_mutex); if (pwq) { put_unbound_pool(pwq->pool); kmem_cache_free(pwq_cache, pwq); } } /** * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node * @attrs: the wq_attrs of interest * @node: the target NUMA node * @cpu_going_down: if >= 0, the CPU to consider as offline * @cpumask: outarg, the resulting cpumask * * Calculate the cpumask a workqueue with @attrs should use on @node. If * @cpu_going_down is >= 0, that cpu is considered offline during * calculation. The result is stored in @cpumask. This function returns * %true if the resulting @cpumask is different from @attrs->cpumask, * %false if equal. * * If NUMA affinity is not enabled, @attrs->cpumask is always used. If * enabled and @node has online CPUs requested by @attrs, the returned * cpumask is the intersection of the possible CPUs of @node and * @attrs->cpumask. * * The caller is responsible for ensuring that the cpumask of @node stays * stable. */ static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, int cpu_going_down, cpumask_t *cpumask) { if (!wq_numa_enabled || attrs->no_numa) goto use_dfl; /* does @node have any online CPUs @attrs wants? */ cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask); if (cpu_going_down >= 0) cpumask_clear_cpu(cpu_going_down, cpumask); if (cpumask_empty(cpumask)) goto use_dfl; /* yeap, return possible CPUs in @node that @attrs wants */ cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]); return !cpumask_equal(cpumask, attrs->cpumask); use_dfl: cpumask_copy(cpumask, attrs->cpumask); return false; } /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, int node, struct pool_workqueue *pwq) { struct pool_workqueue *old_pwq; lockdep_assert_held(&wq->mutex); /* link_pwq() can handle duplicate calls */ link_pwq(pwq); old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); return old_pwq; } /** * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue * @wq: the target workqueue * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() * * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA * machines, this function maps a separate pwq to each NUMA node with * possibles CPUs in @attrs->cpumask so that work items are affine to the * NUMA node it was issued on. Older pwqs are released as in-flight work * items finish. Note that a work item which repeatedly requeues itself * back-to-back will stay on its current pwq. * * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on * failure. */ int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) { struct workqueue_attrs *new_attrs, *tmp_attrs; struct pool_workqueue **pwq_tbl, *dfl_pwq; int node, ret; /* only unbound workqueues can change attributes */ if (WARN_ON(!(wq->flags & WQ_UNBOUND))) return -EINVAL; /* creating multiple pwqs breaks ordering guarantee */ if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) return -EINVAL; pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL); new_attrs = alloc_workqueue_attrs(GFP_KERNEL); tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); if (!pwq_tbl || !new_attrs || !tmp_attrs) goto enomem; /* make a copy of @attrs and sanitize it */ copy_workqueue_attrs(new_attrs, attrs); cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); /* * We may create multiple pwqs with differing cpumasks. Make a * copy of @new_attrs which will be modified and used to obtain * pools. */ copy_workqueue_attrs(tmp_attrs, new_attrs); /* * CPUs should stay stable across pwq creations and installations. * Pin CPUs, determine the target cpumask for each node and create * pwqs accordingly. */ get_online_cpus(); mutex_lock(&wq_pool_mutex); /* * If something goes wrong during CPU up/down, we'll fall back to * the default pwq covering whole @attrs->cpumask. Always create * it even if we don't use it immediately. */ dfl_pwq = alloc_unbound_pwq(wq, new_attrs); if (!dfl_pwq) goto enomem_pwq; for_each_node(node) { if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) { pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); if (!pwq_tbl[node]) goto enomem_pwq; } else { dfl_pwq->refcnt++; pwq_tbl[node] = dfl_pwq; } } mutex_unlock(&wq_pool_mutex); /* all pwqs have been created successfully, let's install'em */ mutex_lock(&wq->mutex); copy_workqueue_attrs(wq->unbound_attrs, new_attrs); /* save the previous pwq and install the new one */ for_each_node(node) pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]); /* @dfl_pwq might not have been used, ensure it's linked */ link_pwq(dfl_pwq); swap(wq->dfl_pwq, dfl_pwq); mutex_unlock(&wq->mutex); /* put the old pwqs */ for_each_node(node) put_pwq_unlocked(pwq_tbl[node]); put_pwq_unlocked(dfl_pwq); put_online_cpus(); ret = 0; /* fall through */ out_free: free_workqueue_attrs(tmp_attrs); free_workqueue_attrs(new_attrs); kfree(pwq_tbl); return ret; enomem_pwq: free_unbound_pwq(dfl_pwq); for_each_node(node) if (pwq_tbl && pwq_tbl[node] != dfl_pwq) free_unbound_pwq(pwq_tbl[node]); mutex_unlock(&wq_pool_mutex); put_online_cpus(); enomem: ret = -ENOMEM; goto out_free; } /** * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug * @wq: the target workqueue * @cpu: the CPU coming up or going down * @online: whether @cpu is coming up or going down * * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of * @wq accordingly. * * If NUMA affinity can't be adjusted due to memory allocation failure, it * falls back to @wq->dfl_pwq which may not be optimal but is always * correct. * * Note that when the last allowed CPU of a NUMA node goes offline for a * workqueue with a cpumask spanning multiple nodes, the workers which were * already executing the work items for the workqueue will lose their CPU * affinity and may execute on any CPU. This is similar to how per-cpu * workqueues behave on CPU_DOWN. If a workqueue user wants strict * affinity, it's the user's responsibility to flush the work item from * CPU_DOWN_PREPARE. */ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, bool online) { int node = cpu_to_node(cpu); int cpu_off = online ? -1 : cpu; struct pool_workqueue *old_pwq = NULL, *pwq; struct workqueue_attrs *target_attrs; cpumask_t *cpumask; lockdep_assert_held(&wq_pool_mutex); if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND)) return; /* * We don't wanna alloc/free wq_attrs for each wq for each CPU. * Let's use a preallocated one. The following buf is protected by * CPU hotplug exclusion. */ target_attrs = wq_update_unbound_numa_attrs_buf; cpumask = target_attrs->cpumask; mutex_lock(&wq->mutex); if (wq->unbound_attrs->no_numa) goto out_unlock; copy_workqueue_attrs(target_attrs, wq->unbound_attrs); pwq = unbound_pwq_by_node(wq, node); /* * Let's determine what needs to be done. If the target cpumask is * different from wq's, we need to compare it to @pwq's and create * a new one if they don't match. If the target cpumask equals * wq's, the default pwq should be used. If @pwq is already the * default one, nothing to do; otherwise, install the default one. */ if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) { if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) goto out_unlock; } else { if (pwq == wq->dfl_pwq) goto out_unlock; else goto use_dfl_pwq; } mutex_unlock(&wq->mutex); /* create a new pwq */ pwq = alloc_unbound_pwq(wq, target_attrs); if (!pwq) { pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", wq->name); mutex_lock(&wq->mutex); goto use_dfl_pwq; } /* * Install the new pwq. As this function is called only from CPU * hotplug callbacks and applying a new attrs is wrapped with * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed * inbetween. */ mutex_lock(&wq->mutex); old_pwq = numa_pwq_tbl_install(wq, node, pwq); goto out_unlock; use_dfl_pwq: spin_lock_irq(&wq->dfl_pwq->pool->lock); get_pwq(wq->dfl_pwq); spin_unlock_irq(&wq->dfl_pwq->pool->lock); old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); out_unlock: mutex_unlock(&wq->mutex); put_pwq_unlocked(old_pwq); } static int alloc_and_link_pwqs(struct workqueue_struct *wq) { bool highpri = wq->flags & WQ_HIGHPRI; int cpu, ret; if (!(wq->flags & WQ_UNBOUND)) { wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); if (!wq->cpu_pwqs) return -ENOMEM; for_each_possible_cpu(cpu) { struct pool_workqueue *pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); struct worker_pool *cpu_pools = per_cpu(cpu_worker_pools, cpu); init_pwq(pwq, wq, &cpu_pools[highpri]); mutex_lock(&wq->mutex); link_pwq(pwq); mutex_unlock(&wq->mutex); } return 0; } else if (wq->flags & __WQ_ORDERED) { ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); /* there should only be single pwq for ordering guarantee */ WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), "ordering guarantee broken for workqueue %s\n", wq->name); return ret; } else { return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); } } static int wq_clamp_max_active(int max_active, unsigned int flags, const char *name) { int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; if (max_active < 1 || max_active > lim) pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", max_active, name, 1, lim); return clamp_val(max_active, 1, lim); } struct workqueue_struct *__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, struct lock_class_key *key, const char *lock_name, ...) { size_t tbl_size = 0; va_list args; struct workqueue_struct *wq; struct pool_workqueue *pwq; /* allocate wq and format name */ if (flags & WQ_UNBOUND) tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]); wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); if (!wq) return NULL; if (flags & WQ_UNBOUND) { wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); if (!wq->unbound_attrs) goto err_free_wq; } va_start(args, lock_name); vsnprintf(wq->name, sizeof(wq->name), fmt, args); va_end(args); max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, flags, wq->name); /* init wq */ wq->flags = flags; wq->saved_max_active = max_active; mutex_init(&wq->mutex); atomic_set(&wq->nr_pwqs_to_flush, 0); INIT_LIST_HEAD(&wq->pwqs); INIT_LIST_HEAD(&wq->flusher_queue); INIT_LIST_HEAD(&wq->flusher_overflow); INIT_LIST_HEAD(&wq->maydays); lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); INIT_LIST_HEAD(&wq->list); if (alloc_and_link_pwqs(wq) < 0) goto err_free_wq; /* * Workqueues which may be used during memory reclaim should * have a rescuer to guarantee forward progress. */ if (flags & WQ_MEM_RECLAIM) { struct worker *rescuer; rescuer = alloc_worker(); if (!rescuer) goto err_destroy; rescuer->rescue_wq = wq; rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name); if (IS_ERR(rescuer->task)) { kfree(rescuer); goto err_destroy; } wq->rescuer = rescuer; rescuer->task->flags |= PF_NO_SETAFFINITY; wake_up_process(rescuer->task); } if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) goto err_destroy; /* * wq_pool_mutex protects global freeze state and workqueues list. * Grab it, adjust max_active and add the new @wq to workqueues * list. */ mutex_lock(&wq_pool_mutex); mutex_lock(&wq->mutex); for_each_pwq(pwq, wq) pwq_adjust_max_active(pwq); mutex_unlock(&wq->mutex); list_add(&wq->list, &workqueues); mutex_unlock(&wq_pool_mutex); return wq; err_free_wq: free_workqueue_attrs(wq->unbound_attrs); kfree(wq); return NULL; err_destroy: destroy_workqueue(wq); return NULL; } EXPORT_SYMBOL_GPL(__alloc_workqueue_key); /** * destroy_workqueue - safely terminate a workqueue * @wq: target workqueue * * Safely destroy a workqueue. All work currently pending will be done first. */ void destroy_workqueue(struct workqueue_struct *wq) { struct pool_workqueue *pwq; int node; /* drain it before proceeding with destruction */ drain_workqueue(wq); /* sanity checks */ mutex_lock(&wq->mutex); for_each_pwq(pwq, wq) { int i; for (i = 0; i < WORK_NR_COLORS; i++) { if (WARN_ON(pwq->nr_in_flight[i])) { mutex_unlock(&wq->mutex); return; } } if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || WARN_ON(pwq->nr_active) || WARN_ON(!list_empty(&pwq->delayed_works))) { mutex_unlock(&wq->mutex); return; } } mutex_unlock(&wq->mutex); /* * wq list is used to freeze wq, remove from list after * flushing is complete in case freeze races us. */ mutex_lock(&wq_pool_mutex); list_del_init(&wq->list); mutex_unlock(&wq_pool_mutex); workqueue_sysfs_unregister(wq); if (wq->rescuer) { kthread_stop(wq->rescuer->task); kfree(wq->rescuer); wq->rescuer = NULL; } if (!(wq->flags & WQ_UNBOUND)) { /* * The base ref is never dropped on per-cpu pwqs. Directly * free the pwqs and wq. */ free_percpu(wq->cpu_pwqs); kfree(wq); } else { /* * We're the sole accessor of @wq at this point. Directly * access numa_pwq_tbl[] and dfl_pwq to put the base refs. * @wq will be freed when the last pwq is released. */ for_each_node(node) { pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); put_pwq_unlocked(pwq); } /* * Put dfl_pwq. @wq may be freed any time after dfl_pwq is * put. Don't access it afterwards. */ pwq = wq->dfl_pwq; wq->dfl_pwq = NULL; put_pwq_unlocked(pwq); } } EXPORT_SYMBOL_GPL(destroy_workqueue); /** * workqueue_set_max_active - adjust max_active of a workqueue * @wq: target workqueue * @max_active: new max_active value. * * Set max_active of @wq to @max_active. * * CONTEXT: * Don't call from IRQ context. */ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) { struct pool_workqueue *pwq; /* disallow meddling with max_active for ordered workqueues */ if (WARN_ON(wq->flags & __WQ_ORDERED)) return; max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); mutex_lock(&wq->mutex); wq->saved_max_active = max_active; for_each_pwq(pwq, wq) pwq_adjust_max_active(pwq); mutex_unlock(&wq->mutex); } EXPORT_SYMBOL_GPL(workqueue_set_max_active); /** * current_is_workqueue_rescuer - is %current workqueue rescuer? * * Determine whether %current is a workqueue rescuer. Can be used from * work functions to determine whether it's being run off the rescuer task. */ bool current_is_workqueue_rescuer(void) { struct worker *worker = current_wq_worker(); return worker && worker->rescue_wq; } /** * workqueue_congested - test whether a workqueue is congested * @cpu: CPU in question * @wq: target workqueue * * Test whether @wq's cpu workqueue for @cpu is congested. There is * no synchronization around this function and the test result is * unreliable and only useful as advisory hints or for debugging. * * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. * Note that both per-cpu and unbound workqueues may be associated with * multiple pool_workqueues which have separate congested states. A * workqueue being congested on one CPU doesn't mean the workqueue is also * contested on other CPUs / NUMA nodes. * * RETURNS: * %true if congested, %false otherwise. */ bool workqueue_congested(int cpu, struct workqueue_struct *wq) { struct pool_workqueue *pwq; bool ret; rcu_read_lock_sched(); if (cpu == WORK_CPU_UNBOUND) cpu = smp_processor_id(); if (!(wq->flags & WQ_UNBOUND)) pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); else pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); ret = !list_empty(&pwq->delayed_works); rcu_read_unlock_sched(); return ret; } EXPORT_SYMBOL_GPL(workqueue_congested); /** * work_busy - test whether a work is currently pending or running * @work: the work to be tested * * Test whether @work is currently pending or running. There is no * synchronization around this function and the test result is * unreliable and only useful as advisory hints or for debugging. * * RETURNS: * OR'd bitmask of WORK_BUSY_* bits. */ unsigned int work_busy(struct work_struct *work) { struct worker_pool *pool; unsigned long flags; unsigned int ret = 0; if (work_pending(work)) ret |= WORK_BUSY_PENDING; local_irq_save(flags); pool = get_work_pool(work); if (pool) { spin_lock(&pool->lock); if (find_worker_executing_work(pool, work)) ret |= WORK_BUSY_RUNNING; spin_unlock(&pool->lock); } local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(work_busy); /** * set_worker_desc - set description for the current work item * @fmt: printf-style format string * @...: arguments for the format string * * This function can be called by a running work function to describe what * the work item is about. If the worker task gets dumped, this * information will be printed out together to help debugging. The * description can be at most WORKER_DESC_LEN including the trailing '\0'. */ void set_worker_desc(const char *fmt, ...) { struct worker *worker = current_wq_worker(); va_list args; if (worker) { va_start(args, fmt); vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); va_end(args); worker->desc_valid = true; } } /** * print_worker_info - print out worker information and description * @log_lvl: the log level to use when printing * @task: target task * * If @task is a worker and currently executing a work item, print out the * name of the workqueue being serviced and worker description set with * set_worker_desc() by the currently executing work item. * * This function can be safely called on any task as long as the * task_struct itself is accessible. While safe, this function isn't * synchronized and may print out mixups or garbages of limited length. */ void print_worker_info(const char *log_lvl, struct task_struct *task) { work_func_t *fn = NULL; char name[WQ_NAME_LEN] = { }; char desc[WORKER_DESC_LEN] = { }; struct pool_workqueue *pwq = NULL; struct workqueue_struct *wq = NULL; bool desc_valid = false; struct worker *worker; if (!(task->flags & PF_WQ_WORKER)) return; /* * This function is called without any synchronization and @task * could be in any state. Be careful with dereferences. */ worker = probe_kthread_data(task); /* * Carefully copy the associated workqueue's workfn and name. Keep * the original last '\0' in case the original contains garbage. */ probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); probe_kernel_read(name, wq->name, sizeof(name) - 1); /* copy worker description */ probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid)); if (desc_valid) probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); if (fn || name[0] || desc[0]) { printk("%sWorkqueue: %s %pf", log_lvl, name, fn); if (desc[0]) pr_cont(" (%s)", desc); pr_cont("\n"); } } /* * CPU hotplug. * * There are two challenges in supporting CPU hotplug. Firstly, there * are a lot of assumptions on strong associations among work, pwq and * pool which make migrating pending and scheduled works very * difficult to implement without impacting hot paths. Secondly, * worker pools serve mix of short, long and very long running works making * blocked draining impractical. * * This is solved by allowing the pools to be disassociated from the CPU * running as an unbound one and allowing it to be reattached later if the * cpu comes back online. */ static void wq_unbind_fn(struct work_struct *work) { int cpu = smp_processor_id(); struct worker_pool *pool; struct worker *worker; int wi; for_each_cpu_worker_pool(pool, cpu) { WARN_ON_ONCE(cpu != smp_processor_id()); mutex_lock(&pool->manager_mutex); spin_lock_irq(&pool->lock); /* * We've blocked all manager operations. Make all workers * unbound and set DISASSOCIATED. Before this, all workers * except for the ones which are still executing works from * before the last CPU down must be on the cpu. After * this, they may become diasporas. */ for_each_pool_worker(worker, wi, pool) worker->flags |= WORKER_UNBOUND; pool->flags |= POOL_DISASSOCIATED; spin_unlock_irq(&pool->lock); mutex_unlock(&pool->manager_mutex); /* * Call schedule() so that we cross rq->lock and thus can * guarantee sched callbacks see the %WORKER_UNBOUND flag. * This is necessary as scheduler callbacks may be invoked * from other cpus. */ schedule(); /* * Sched callbacks are disabled now. Zap nr_running. * After this, nr_running stays zero and need_more_worker() * and keep_working() are always true as long as the * worklist is not empty. This pool now behaves as an * unbound (in terms of concurrency management) pool which * are served by workers tied to the pool. */ atomic_set(&pool->nr_running, 0); /* * With concurrency management just turned off, a busy * worker blocking could lead to lengthy stalls. Kick off * unbound chain execution of currently pending work items. */ spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } /** * rebind_workers - rebind all workers of a pool to the associated CPU * @pool: pool of interest * * @pool->cpu is coming online. Rebind all workers to the CPU. */ static void rebind_workers(struct worker_pool *pool) { struct worker *worker; int wi; lockdep_assert_held(&pool->manager_mutex); /* * Restore CPU affinity of all workers. As all idle workers should * be on the run-queue of the associated CPU before any local * wake-ups for concurrency management happen, restore CPU affinty * of all workers first and then clear UNBOUND. As we're called * from CPU_ONLINE, the following shouldn't fail. */ for_each_pool_worker(worker, wi, pool) WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0); spin_lock_irq(&pool->lock); for_each_pool_worker(worker, wi, pool) { unsigned int worker_flags = worker->flags; /* * A bound idle worker should actually be on the runqueue * of the associated CPU for local wake-ups targeting it to * work. Kick all idle workers so that they migrate to the * associated CPU. Doing this in the same loop as * replacing UNBOUND with REBOUND is safe as no worker will * be bound before @pool->lock is released. */ if (worker_flags & WORKER_IDLE) wake_up_process(worker->task); /* * We want to clear UNBOUND but can't directly call * worker_clr_flags() or adjust nr_running. Atomically * replace UNBOUND with another NOT_RUNNING flag REBOUND. * @worker will clear REBOUND using worker_clr_flags() when * it initiates the next execution cycle thus restoring * concurrency management. Note that when or whether * @worker clears REBOUND doesn't affect correctness. * * ACCESS_ONCE() is necessary because @worker->flags may be * tested without holding any lock in * wq_worker_waking_up(). Without it, NOT_RUNNING test may * fail incorrectly leading to premature concurrency * management operations. */ WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); worker_flags |= WORKER_REBOUND; worker_flags &= ~WORKER_UNBOUND; ACCESS_ONCE(worker->flags) = worker_flags; } spin_unlock_irq(&pool->lock); } /** * restore_unbound_workers_cpumask - restore cpumask of unbound workers * @pool: unbound pool of interest * @cpu: the CPU which is coming up * * An unbound pool may end up with a cpumask which doesn't have any online * CPUs. When a worker of such pool get scheduled, the scheduler resets * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any * online CPU before, cpus_allowed of all its workers should be restored. */ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) { static cpumask_t cpumask; struct worker *worker; int wi; lockdep_assert_held(&pool->manager_mutex); /* is @cpu allowed for @pool? */ if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) return; /* is @cpu the only online CPU? */ cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); if (cpumask_weight(&cpumask) != 1) return; /* as we're called from CPU_ONLINE, the following shouldn't fail */ for_each_pool_worker(worker, wi, pool) WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0); } /* * Workqueues should be brought up before normal priority CPU notifiers. * This will be registered high priority CPU notifier. */ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; struct worker_pool *pool; struct workqueue_struct *wq; int pi; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: for_each_cpu_worker_pool(pool, cpu) { if (pool->nr_workers) continue; if (create_and_start_worker(pool) < 0) return NOTIFY_BAD; } break; case CPU_DOWN_FAILED: case CPU_ONLINE: mutex_lock(&wq_pool_mutex); for_each_pool(pool, pi) { mutex_lock(&pool->manager_mutex); if (pool->cpu == cpu) { spin_lock_irq(&pool->lock); pool->flags &= ~POOL_DISASSOCIATED; spin_unlock_irq(&pool->lock); rebind_workers(pool); } else if (pool->cpu < 0) { restore_unbound_workers_cpumask(pool, cpu); } mutex_unlock(&pool->manager_mutex); } /* update NUMA affinity of unbound workqueues */ list_for_each_entry(wq, &workqueues, list) wq_update_unbound_numa(wq, cpu, true); mutex_unlock(&wq_pool_mutex); break; } return NOTIFY_OK; } /* * Workqueues should be brought down after normal priority CPU notifiers. * This will be registered as low priority CPU notifier. */ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; struct work_struct unbind_work; struct workqueue_struct *wq; switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: /* unbinding per-cpu workers should happen on the local CPU */ INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); queue_work_on(cpu, system_highpri_wq, &unbind_work); /* update NUMA affinity of unbound workqueues */ mutex_lock(&wq_pool_mutex); list_for_each_entry(wq, &workqueues, list) wq_update_unbound_numa(wq, cpu, false); mutex_unlock(&wq_pool_mutex); /* wait for per-cpu unbinding to finish */ flush_work(&unbind_work); break; } return NOTIFY_OK; } #ifdef CONFIG_SMP struct work_for_cpu { struct work_struct work; long (*fn)(void *); void *arg; long ret; }; static void work_for_cpu_fn(struct work_struct *work) { struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); wfc->ret = wfc->fn(wfc->arg); } /** * work_on_cpu - run a function in user context on a particular cpu * @cpu: the cpu to run on * @fn: the function to run * @arg: the function arg * * This will return the value @fn returns. * It is up to the caller to ensure that the cpu doesn't go offline. * The caller must not hold any locks which would prevent @fn from completing. */ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) { struct work_for_cpu wfc = { .fn = fn, .arg = arg }; INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); schedule_work_on(cpu, &wfc.work); flush_work(&wfc.work); return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); #endif /* CONFIG_SMP */ #ifdef CONFIG_FREEZER /** * freeze_workqueues_begin - begin freezing workqueues * * Start freezing workqueues. After this function returns, all freezable * workqueues will queue new works to their delayed_works list instead of * pool->worklist. * * CONTEXT: * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. */ void freeze_workqueues_begin(void) { struct worker_pool *pool; struct workqueue_struct *wq; struct pool_workqueue *pwq; int pi; mutex_lock(&wq_pool_mutex); WARN_ON_ONCE(workqueue_freezing); workqueue_freezing = true; /* set FREEZING */ for_each_pool(pool, pi) { spin_lock_irq(&pool->lock); WARN_ON_ONCE(pool->flags & POOL_FREEZING); pool->flags |= POOL_FREEZING; spin_unlock_irq(&pool->lock); } list_for_each_entry(wq, &workqueues, list) { mutex_lock(&wq->mutex); for_each_pwq(pwq, wq) pwq_adjust_max_active(pwq); mutex_unlock(&wq->mutex); } mutex_unlock(&wq_pool_mutex); } /** * freeze_workqueues_busy - are freezable workqueues still busy? * * Check whether freezing is complete. This function must be called * between freeze_workqueues_begin() and thaw_workqueues(). * * CONTEXT: * Grabs and releases wq_pool_mutex. * * RETURNS: * %true if some freezable workqueues are still busy. %false if freezing * is complete. */ bool freeze_workqueues_busy(void) { bool busy = false; struct workqueue_struct *wq; struct pool_workqueue *pwq; mutex_lock(&wq_pool_mutex); WARN_ON_ONCE(!workqueue_freezing); list_for_each_entry(wq, &workqueues, list) { if (!(wq->flags & WQ_FREEZABLE)) continue; /* * nr_active is monotonically decreasing. It's safe * to peek without lock. */ rcu_read_lock_sched(); for_each_pwq(pwq, wq) { WARN_ON_ONCE(pwq->nr_active < 0); if (pwq->nr_active) { busy = true; rcu_read_unlock_sched(); goto out_unlock; } } rcu_read_unlock_sched(); } out_unlock: mutex_unlock(&wq_pool_mutex); return busy; } /** * thaw_workqueues - thaw workqueues * * Thaw workqueues. Normal queueing is restored and all collected * frozen works are transferred to their respective pool worklists. * * CONTEXT: * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. */ void thaw_workqueues(void) { struct workqueue_struct *wq; struct pool_workqueue *pwq; struct worker_pool *pool; int pi; mutex_lock(&wq_pool_mutex); if (!workqueue_freezing) goto out_unlock; /* clear FREEZING */ for_each_pool(pool, pi) { spin_lock_irq(&pool->lock); WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); pool->flags &= ~POOL_FREEZING; spin_unlock_irq(&pool->lock); } /* restore max_active and repopulate worklist */ list_for_each_entry(wq, &workqueues, list) { mutex_lock(&wq->mutex); for_each_pwq(pwq, wq) pwq_adjust_max_active(pwq); mutex_unlock(&wq->mutex); } workqueue_freezing = false; out_unlock: mutex_unlock(&wq_pool_mutex); } #endif /* CONFIG_FREEZER */ static void __init wq_numa_init(void) { cpumask_var_t *tbl; int node, cpu; /* determine NUMA pwq table len - highest node id + 1 */ for_each_node(node) wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1); if (num_possible_nodes() <= 1) return; if (wq_disable_numa) { pr_info("workqueue: NUMA affinity support disabled\n"); return; } wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); BUG_ON(!wq_update_unbound_numa_attrs_buf); /* * We want masks of possible CPUs of each node which isn't readily * available. Build one from cpu_to_node() which should have been * fully initialized by now. */ tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL); BUG_ON(!tbl); for_each_node(node) BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node_online(node) ? node : NUMA_NO_NODE)); for_each_possible_cpu(cpu) { node = cpu_to_node(cpu); if (WARN_ON(node == NUMA_NO_NODE)) { pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu); /* happens iff arch is bonkers, let's just proceed */ return; } cpumask_set_cpu(cpu, tbl[node]); } wq_numa_possible_cpumask = tbl; wq_numa_enabled = true; } static int __init init_workqueues(void) { int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; int i, cpu; /* make sure we have enough bits for OFFQ pool ID */ BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < WORK_CPU_END * NR_STD_WORKER_POOLS); WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); wq_numa_init(); /* initialize CPU pools */ for_each_possible_cpu(cpu) { struct worker_pool *pool; i = 0; for_each_cpu_worker_pool(pool, cpu) { BUG_ON(init_worker_pool(pool)); pool->cpu = cpu; cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); pool->attrs->nice = std_nice[i++]; pool->node = cpu_to_node(cpu); /* alloc pool ID */ mutex_lock(&wq_pool_mutex); BUG_ON(worker_pool_assign_id(pool)); mutex_unlock(&wq_pool_mutex); } } /* create the initial worker */ for_each_online_cpu(cpu) { struct worker_pool *pool; for_each_cpu_worker_pool(pool, cpu) { pool->flags &= ~POOL_DISASSOCIATED; BUG_ON(create_and_start_worker(pool) < 0); } } /* create default unbound and ordered wq attrs */ for (i = 0; i < NR_STD_WORKER_POOLS; i++) { struct workqueue_attrs *attrs; BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); attrs->nice = std_nice[i]; unbound_std_wq_attrs[i] = attrs; /* * An ordered wq should have only one pwq as ordering is * guaranteed by max_active which is enforced by pwqs. * Turn off NUMA so that dfl_pwq is used for all nodes. */ BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); attrs->nice = std_nice[i]; attrs->no_numa = true; ordered_wq_attrs[i] = attrs; } system_wq = alloc_workqueue("events", 0, 0); system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); system_long_wq = alloc_workqueue("events_long", 0, 0); system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); system_freezable_wq = alloc_workqueue("events_freezable", WQ_FREEZABLE, 0); BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || !system_unbound_wq || !system_freezable_wq); return 0; } early_initcall(init_workqueues);
gpl-2.0
xmyth/linux-mips-osolution
arch/ia64/ia32/ia32_signal.c
60
29847
/* * IA32 Architecture-specific signal handling support. * * Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> * Copyright (C) 2000 VA Linux Co * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> * * Derived from i386 and Alpha versions. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/personality.h> #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/wait.h> #include <linux/compat.h> #include <asm/intrinsics.h> #include <asm/uaccess.h> #include <asm/rse.h> #include <asm/sigcontext.h> #include "ia32priv.h" #include "../kernel/sigframe.h" #define A(__x) ((unsigned long)(__x)) #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define __IA32_NR_sigreturn 119 #define __IA32_NR_rt_sigreturn 173 struct sigframe_ia32 { int pretcode; int sig; struct sigcontext_ia32 sc; struct _fpstate_ia32 fpstate; unsigned int extramask[_COMPAT_NSIG_WORDS-1]; char retcode[8]; }; struct rt_sigframe_ia32 { int pretcode; int sig; int pinfo; int puc; compat_siginfo_t info; struct ucontext_ia32 uc; struct _fpstate_ia32 fpstate; char retcode[8]; }; int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from) { unsigned long tmp; int err; if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) return -EFAULT; err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); if (to->si_code < 0) err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { switch (to->si_code >> 16) { case __SI_CHLD >> 16: err |= __get_user(to->si_utime, &from->si_utime); err |= __get_user(to->si_stime, &from->si_stime); err |= __get_user(to->si_status, &from->si_status); default: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); break; case __SI_FAULT >> 16: err |= __get_user(tmp, &from->si_addr); to->si_addr = (void __user *) tmp; break; case __SI_POLL >> 16: err |= __get_user(to->si_band, &from->si_band); err |= __get_user(to->si_fd, &from->si_fd); break; case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); err |= __get_user(to->si_int, &from->si_int); break; } } return err; } int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from) { unsigned int addr; int err; if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) return -EFAULT; /* If you change siginfo_t structure, please be sure this code is fixed accordingly. It should never copy any pad contained in the structure to avoid security leaks, but must copy the generic 3 ints plus the relevant union member. This routine must convert siginfo from 64bit to 32bit as well at the same time. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); if (from->si_code < 0) err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { switch (from->si_code >> 16) { case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); default: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; case __SI_FAULT >> 16: /* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */ err |= __put_user(from->_sifields._pad[0], &to->si_addr); break; case __SI_POLL >> 16: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; case __SI_TIMER >> 16: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); addr = (unsigned long) from->si_ptr; err |= __put_user(addr, &to->si_ptr); break; case __SI_RT >> 16: /* Not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); addr = (unsigned long) from->si_ptr; err |= __put_user(addr, &to->si_ptr); break; } } return err; } /* * SAVE and RESTORE of ia32 fpstate info, from ia64 current state * Used in exception handler to pass the fpstate to the user, and restore * the fpstate while returning from the exception handler. * * fpstate info and their mapping to IA64 regs: * fpstate REG(BITS) Attribute Comments * cw ar.fcr(0:12) with bits 7 and 6 not used * sw ar.fsr(0:15) * tag ar.fsr(16:31) with odd numbered bits not used * (read returns 0, writes ignored) * ipoff ar.fir(0:31) * cssel ar.fir(32:47) * dataoff ar.fdr(0:31) * datasel ar.fdr(32:47) * * _st[(0+TOS)%8] f8 * _st[(1+TOS)%8] f9 * _st[(2+TOS)%8] f10 * _st[(3+TOS)%8] f11 (f8..f11 from ptregs) * : : : (f12..f15 from live reg) * : : : * _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13) * * status Same as sw RO * magic 0 as X86_FXSR_MAGIC in ia32 * mxcsr Bits(7:15)=ar.fcr(39:47) * Bits(0:5) =ar.fsr(32:37) with bit 6 reserved * _xmm[0..7] f16..f31 (live registers) * with _xmm[0] * Bit(64:127)=f17(0:63) * Bit(0:63)=f16(0:63) * All other fields unused... */ static int save_ia32_fpstate_live (struct _fpstate_ia32 __user *save) { struct task_struct *tsk = current; struct pt_regs *ptp; struct _fpreg_ia32 *fpregp; char buf[32]; unsigned long fsr, fcr, fir, fdr; unsigned long new_fsr; unsigned long num128[2]; unsigned long mxcsr=0; int fp_tos, fr8_st_map; if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) return -EFAULT; /* Read in fsr, fcr, fir, fdr and copy onto fpstate */ fsr = ia64_getreg(_IA64_REG_AR_FSR); fcr = ia64_getreg(_IA64_REG_AR_FCR); fir = ia64_getreg(_IA64_REG_AR_FIR); fdr = ia64_getreg(_IA64_REG_AR_FDR); /* * We need to clear the exception state before calling the signal handler. Clear * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex * instruction. */ new_fsr = fsr & ~0x80ff; ia64_setreg(_IA64_REG_AR_FSR, new_fsr); __put_user(fcr & 0xffff, &save->cw); __put_user(fsr & 0xffff, &save->sw); __put_user((fsr>>16) & 0xffff, &save->tag); __put_user(fir, &save->ipoff); __put_user((fir>>32) & 0xffff, &save->cssel); __put_user(fdr, &save->dataoff); __put_user((fdr>>32) & 0xffff, &save->datasel); __put_user(fsr & 0xffff, &save->status); mxcsr = ((fcr>>32) & 0xff80) | ((fsr>>32) & 0x3f); __put_user(mxcsr & 0xffff, &save->mxcsr); __put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000 /* * save f8..f11 from pt_regs * save f12..f15 from live register set */ /* * Find the location where f8 has to go in fp reg stack. This depends on * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps * to. */ fp_tos = (fsr>>11)&0x7; fr8_st_map = (8-fp_tos)&0x7; ptp = task_pt_regs(tsk); fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); ia64f2ia32f(fpregp, &ptp->f8); copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ia64f2ia32f(fpregp, &ptp->f9); copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ia64f2ia32f(fpregp, &ptp->f10); copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ia64f2ia32f(fpregp, &ptp->f11); copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ia64_stfe(fpregp, 12); copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ia64_stfe(fpregp, 13); copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ia64_stfe(fpregp, 14); copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ia64_stfe(fpregp, 15); copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ia64_stf8(&num128[0], 16); ia64_stf8(&num128[1], 17); copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32)); ia64_stf8(&num128[0], 18); ia64_stf8(&num128[1], 19); copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32)); ia64_stf8(&num128[0], 20); ia64_stf8(&num128[1], 21); copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32)); ia64_stf8(&num128[0], 22); ia64_stf8(&num128[1], 23); copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32)); ia64_stf8(&num128[0], 24); ia64_stf8(&num128[1], 25); copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32)); ia64_stf8(&num128[0], 26); ia64_stf8(&num128[1], 27); copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32)); ia64_stf8(&num128[0], 28); ia64_stf8(&num128[1], 29); copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32)); ia64_stf8(&num128[0], 30); ia64_stf8(&num128[1], 31); copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32)); return 0; } static int restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save) { struct task_struct *tsk = current; struct pt_regs *ptp; unsigned int lo, hi; unsigned long num128[2]; unsigned long num64, mxcsr; struct _fpreg_ia32 *fpregp; char buf[32]; unsigned long fsr, fcr, fir, fdr; int fp_tos, fr8_st_map; if (!access_ok(VERIFY_READ, save, sizeof(*save))) return(-EFAULT); /* * Updating fsr, fcr, fir, fdr. * Just a bit more complicated than save. * - Need to make sure that we don't write any value other than the * specific fpstate info * - Need to make sure that the untouched part of frs, fdr, fir, fcr * should remain same while writing. * So, we do a read, change specific fields and write. */ fsr = ia64_getreg(_IA64_REG_AR_FSR); fcr = ia64_getreg(_IA64_REG_AR_FCR); fir = ia64_getreg(_IA64_REG_AR_FIR); fdr = ia64_getreg(_IA64_REG_AR_FDR); __get_user(mxcsr, (unsigned int __user *)&save->mxcsr); /* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */ __get_user(lo, (unsigned int __user *)&save->cw); num64 = mxcsr & 0xff10; num64 = (num64 << 32) | (lo & 0x1f3f); fcr = (fcr & (~0xff1000001f3fUL)) | num64; /* setting bits 0..31 with sw and tag and 32..37 from mxcsr */ __get_user(lo, (unsigned int __user *)&save->sw); /* set bits 15,7 (fsw.b, fsw.es) to reflect the current error status */ if ( !(lo & 0x7f) ) lo &= (~0x8080); __get_user(hi, (unsigned int __user *)&save->tag); num64 = mxcsr & 0x3f; num64 = (num64 << 16) | (hi & 0xffff); num64 = (num64 << 16) | (lo & 0xffff); fsr = (fsr & (~0x3fffffffffUL)) | num64; /* setting bits 0..47 with cssel and ipoff */ __get_user(lo, (unsigned int __user *)&save->ipoff); __get_user(hi, (unsigned int __user *)&save->cssel); num64 = hi & 0xffff; num64 = (num64 << 32) | lo; fir = (fir & (~0xffffffffffffUL)) | num64; /* setting bits 0..47 with datasel and dataoff */ __get_user(lo, (unsigned int __user *)&save->dataoff); __get_user(hi, (unsigned int __user *)&save->datasel); num64 = hi & 0xffff; num64 = (num64 << 32) | lo; fdr = (fdr & (~0xffffffffffffUL)) | num64; ia64_setreg(_IA64_REG_AR_FSR, fsr); ia64_setreg(_IA64_REG_AR_FCR, fcr); ia64_setreg(_IA64_REG_AR_FIR, fir); ia64_setreg(_IA64_REG_AR_FDR, fdr); /* * restore f8..f11 onto pt_regs * restore f12..f15 onto live registers */ /* * Find the location where f8 has to go in fp reg stack. This depends on * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps * to. */ fp_tos = (fsr>>11)&0x7; fr8_st_map = (8-fp_tos)&0x7; fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); ptp = task_pt_regs(tsk); copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); ia32f2ia64f(&ptp->f8, fpregp); copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); ia32f2ia64f(&ptp->f9, fpregp); copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); ia32f2ia64f(&ptp->f10, fpregp); copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); ia32f2ia64f(&ptp->f11, fpregp); copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); ia64_ldfe(12, fpregp); copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); ia64_ldfe(13, fpregp); copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); ia64_ldfe(14, fpregp); copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); ia64_ldfe(15, fpregp); copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32)); ia64_ldf8(16, &num128[0]); ia64_ldf8(17, &num128[1]); copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32)); ia64_ldf8(18, &num128[0]); ia64_ldf8(19, &num128[1]); copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32)); ia64_ldf8(20, &num128[0]); ia64_ldf8(21, &num128[1]); copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32)); ia64_ldf8(22, &num128[0]); ia64_ldf8(23, &num128[1]); copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32)); ia64_ldf8(24, &num128[0]); ia64_ldf8(25, &num128[1]); copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32)); ia64_ldf8(26, &num128[0]); ia64_ldf8(27, &num128[1]); copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32)); ia64_ldf8(28, &num128[0]); ia64_ldf8(29, &num128[1]); copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32)); ia64_ldf8(30, &num128[0]); ia64_ldf8(31, &num128[1]); return 0; } static inline void sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int restorer) { if (handler + 1 <= 2) /* SIG_DFL, SIG_IGN, or SIG_ERR: must sign-extend to 64-bits */ sa->sa.sa_handler = (__sighandler_t) A((int) handler); else sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler); } asmlinkage long sys32_sigsuspend (int history0, int history1, old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; } asmlinkage long sys32_signal (int sig, unsigned int handler) { struct k_sigaction new_sa, old_sa; int ret; sigact_set_handler(&new_sa, handler, 0); new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; sigemptyset(&new_sa.sa.sa_mask); ret = do_sigaction(sig, &new_sa, &old_sa); return ret ? ret : IA32_SA_HANDLER(&old_sa); } asmlinkage long sys32_rt_sigaction (int sig, struct sigaction32 __user *act, struct sigaction32 __user *oact, unsigned int sigsetsize) { struct k_sigaction new_ka, old_ka; unsigned int handler, restorer; int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; if (act) { ret = get_user(handler, &act->sa_handler); ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); ret |= get_user(restorer, &act->sa_restorer); ret |= copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, sizeof(compat_sigset_t)); if (ret) return -EFAULT; sigact_set_handler(&new_ka, handler, restorer); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler); ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer); ret |= copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask, sizeof(compat_sigset_t)); } return ret; } asmlinkage long sys32_rt_sigprocmask (int how, compat_sigset_t __user *set, compat_sigset_t __user *oset, unsigned int sigsetsize) { mm_segment_t old_fs = get_fs(); sigset_t s; long ret; if (sigsetsize > sizeof(s)) return -EINVAL; if (set) { memset(&s, 0, sizeof(s)); if (copy_from_user(&s.sig, set, sigsetsize)) return -EFAULT; } set_fs(KERNEL_DS); ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *) &s : NULL, oset ? (sigset_t __user *) &s : NULL, sizeof(s)); set_fs(old_fs); if (ret) return ret; if (oset) { if (copy_to_user(oset, &s.sig, sigsetsize)) return -EFAULT; } return 0; } asmlinkage long sys32_rt_sigqueueinfo (int pid, int sig, compat_siginfo_t __user *uinfo) { mm_segment_t old_fs = get_fs(); siginfo_t info; int ret; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); set_fs(old_fs); return ret; } asmlinkage long sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact) { struct k_sigaction new_ka, old_ka; unsigned int handler, restorer; int ret; if (act) { compat_old_sigset_t mask; ret = get_user(handler, &act->sa_handler); ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); ret |= get_user(restorer, &act->sa_restorer); ret |= get_user(mask, &act->sa_mask); if (ret) return ret; sigact_set_handler(&new_ka, handler, restorer); siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler); ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer); ret |= put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; } static int setup_sigcontext_ia32 (struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate, struct pt_regs *regs, unsigned long mask) { int err = 0; unsigned long flag; if (!access_ok(VERIFY_WRITE, sc, sizeof(*sc))) return -EFAULT; err |= __put_user((regs->r16 >> 32) & 0xffff, (unsigned int __user *)&sc->fs); err |= __put_user((regs->r16 >> 48) & 0xffff, (unsigned int __user *)&sc->gs); err |= __put_user((regs->r16 >> 16) & 0xffff, (unsigned int __user *)&sc->es); err |= __put_user(regs->r16 & 0xffff, (unsigned int __user *)&sc->ds); err |= __put_user(regs->r15, &sc->edi); err |= __put_user(regs->r14, &sc->esi); err |= __put_user(regs->r13, &sc->ebp); err |= __put_user(regs->r12, &sc->esp); err |= __put_user(regs->r11, &sc->ebx); err |= __put_user(regs->r10, &sc->edx); err |= __put_user(regs->r9, &sc->ecx); err |= __put_user(regs->r8, &sc->eax); #if 0 err |= __put_user(current->tss.trap_no, &sc->trapno); err |= __put_user(current->tss.error_code, &sc->err); #endif err |= __put_user(regs->cr_iip, &sc->eip); err |= __put_user(regs->r17 & 0xffff, (unsigned int __user *)&sc->cs); /* * `eflags' is in an ar register for this context */ flag = ia64_getreg(_IA64_REG_AR_EFLAG); err |= __put_user((unsigned int)flag, &sc->eflags); err |= __put_user(regs->r12, &sc->esp_at_signal); err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int __user *)&sc->ss); if ( save_ia32_fpstate_live(fpstate) < 0 ) err = -EFAULT; else err |= __put_user((u32)(u64)fpstate, &sc->fpstate); #if 0 tmp = save_i387(fpstate); if (tmp < 0) err = 1; else err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); /* non-iBCS2 extensions.. */ #endif err |= __put_user(mask, &sc->oldmask); #if 0 err |= __put_user(current->tss.cr2, &sc->cr2); #endif return err; } static int restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 __user *sc, int *peax) { unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) return(-EFAULT); #define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x) #define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48) #define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32) #define copyseg_cs(tmp) (regs->r17 |= tmp) #define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16) #define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16) #define copyseg_ds(tmp) (regs->r16 |= tmp) #define COPY_SEG(seg) \ { \ unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ copyseg_##seg(tmp); \ } #define COPY_SEG_STRICT(seg) \ { \ unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ copyseg_##seg(tmp|3); \ } /* To make COPY_SEGs easier, we zero r16, r17 */ regs->r16 = 0; regs->r17 = 0; COPY_SEG(gs); COPY_SEG(fs); COPY_SEG(es); COPY_SEG(ds); COPY(r15, edi); COPY(r14, esi); COPY(r13, ebp); COPY(r12, esp); COPY(r11, ebx); COPY(r10, edx); COPY(r9, ecx); COPY(cr_iip, eip); COPY_SEG_STRICT(cs); COPY_SEG_STRICT(ss); ia32_load_segment_descriptors(current); { unsigned int tmpflags; unsigned long flag; /* * IA32 `eflags' is not part of `pt_regs', it's in an ar register which * is part of the thread context. Fortunately, we are executing in the * IA32 process's context. */ err |= __get_user(tmpflags, &sc->eflags); flag = ia64_getreg(_IA64_REG_AR_EFLAG); flag &= ~0x40DD5; flag |= (tmpflags & 0x40DD5); ia64_setreg(_IA64_REG_AR_EFLAG, flag); regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */ } { struct _fpstate_ia32 __user *buf = NULL; u32 fpstate_ptr; err |= get_user(fpstate_ptr, &(sc->fpstate)); buf = compat_ptr(fpstate_ptr); if (buf) { err |= restore_ia32_fpstate_live(buf); } } #if 0 { struct _fpstate * buf; err |= __get_user(buf, &sc->fpstate); if (buf) { if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) goto badframe; err |= restore_i387(buf); } } #endif err |= __get_user(*peax, &sc->eax); return err; #if 0 badframe: return 1; #endif } /* * Determine which stack to use.. */ static inline void __user * get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) { unsigned long esp; /* Default to using normal stack (truncate off sign-extension of bit 31: */ esp = (unsigned int) regs->r12; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (!on_sig_stack(esp)) esp = current->sas_ss_sp + current->sas_ss_size; } /* Legacy stack switching not supported */ esp -= frame_size; /* Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ esp = ((esp + 4) & -16ul) - 4; return (void __user *) esp; } static int setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) { struct exec_domain *ed = current_thread_info()->exec_domain; struct sigframe_ia32 __user *frame; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user((ed && ed->signal_invmap && sig < 32 ? (int)(ed->signal_invmap[sig]) : sig), &frame->sig); err |= setup_sigcontext_ia32(&frame->sc, &frame->fpstate, regs, set->sig[0]); if (_COMPAT_NSIG_WORDS > 1) err |= __copy_to_user(frame->extramask, (char *) &set->sig + 4, sizeof(frame->extramask)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { unsigned int restorer = IA32_SA_RESTORER(ka); err |= __put_user(restorer, &frame->pretcode); } else { /* Pointing to restorer in ia32 gate page */ err |= __put_user(IA32_GATE_OFFSET, &frame->pretcode); } /* This is popl %eax ; movl $,%eax ; int $0x80 * and there for historical reasons only. * See arch/i386/kernel/signal.c */ err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); err |= __put_user(__IA32_NR_sigreturn, (int __user *)(frame->retcode+2)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->r12 = (unsigned long) frame; regs->cr_iip = IA32_SA_HANDLER(ka); set_fs(USER_DS); #if 0 regs->eflags &= ~TF_MASK; #endif #if 0 printk("SIG deliver (%s:%d): sig=%d sp=%p pc=%lx ra=%x\n", current->comm, current->pid, sig, (void *) frame, regs->cr_iip, frame->pretcode); #endif return 1; give_sigsegv: force_sigsegv(sig, current); return 0; } static int setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs) { struct exec_domain *ed = current_thread_info()->exec_domain; compat_uptr_t pinfo, puc; struct rt_sigframe_ia32 __user *frame; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user((ed && ed->signal_invmap && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig); pinfo = (long __user) &frame->info; puc = (long __user) &frame->uc; err |= __put_user(pinfo, &frame->pinfo); err |= __put_user(puc, &frame->puc); err |= copy_siginfo_to_user32(&frame->info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->r12), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext_ia32(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { unsigned int restorer = IA32_SA_RESTORER(ka); err |= __put_user(restorer, &frame->pretcode); } else { /* Pointing to rt_restorer in ia32 gate page */ err |= __put_user(IA32_GATE_OFFSET + 8, &frame->pretcode); } /* This is movl $,%eax ; int $0x80 * and there for historical reasons only. * See arch/i386/kernel/signal.c */ err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); err |= __put_user(__IA32_NR_rt_sigreturn, (int __user *)(frame->retcode+1)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->r12 = (unsigned long) frame; regs->cr_iip = IA32_SA_HANDLER(ka); set_fs(USER_DS); #if 0 regs->eflags &= ~TF_MASK; #endif #if 0 printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n", current->comm, current->pid, (void *) frame, regs->cr_iip, frame->pretcode); #endif return 1; give_sigsegv: force_sigsegv(sig, current); return 0; } int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) return setup_rt_frame_ia32(sig, ka, info, set, regs); else return setup_frame_ia32(sig, ka, set, regs); } asmlinkage long sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, struct pt_regs regs) { unsigned long esp = (unsigned int) regs.r12; struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8); sigset_t set; int eax; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) || (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((char *) &set.sig + 4, &frame->extramask, sizeof(frame->extramask)))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext_ia32(&regs, &frame->sc, &eax)) goto badframe; return eax; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage long sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, struct pt_regs regs) { unsigned long esp = (unsigned int) regs.r12; struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4); sigset_t set; int eax; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext_ia32(&regs, &frame->uc.uc_mcontext, &eax)) goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ do_sigaltstack((stack_t __user *) &frame->uc.uc_stack, NULL, esp); return eax; badframe: force_sig(SIGSEGV, current); return 0; }
gpl-2.0
loongson-community/preempt-rt-linux
net/ipv6/esp6.c
60
14113
/* * Copyright (C)2002 USAGI/WIDE Project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Authors * * Mitsuru KANDA @USAGI : IPv6 Support * Kazunori MIYAZAWA @USAGI : * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * * This file is derived from net/ipv4/esp.c */ #include <crypto/aead.h> #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> #include <linux/scatterlist.h> #include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/icmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <linux/icmpv6.h> struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) /* * Allocate an AEAD request structure with extra space for SG and IV. * * For alignment considerations the IV is placed at the front, followed * by the request and finally the SG list. * * TODO: Use spare space in skb for this where possible. */ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) { unsigned int len; len = crypto_aead_ivsize(aead); if (len) { len += crypto_aead_alignmask(aead) & ~(crypto_tfm_ctx_alignment() - 1); len = ALIGN(len, crypto_tfm_ctx_alignment()); } len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; return kmalloc(len, GFP_ATOMIC); } static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) { return crypto_aead_ivsize(aead) ? PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; } static inline struct aead_givcrypt_request *esp_tmp_givreq( struct crypto_aead *aead, u8 *iv) { struct aead_givcrypt_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_givcrypt_set_tfm(req, aead); return req; } static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) { struct aead_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_request_set_tfm(req, aead); return req; } static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, struct aead_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static inline struct scatterlist *esp_givreq_sg( struct crypto_aead *aead, struct aead_givcrypt_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static void esp_output_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; kfree(ESP_SKB_CB(skb)->tmp); xfrm_output_resume(skb, err); } static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_givcrypt_request *req; struct scatterlist *sg; struct scatterlist *asg; struct sk_buff *trailer; void *tmp; int blksize; int clen; int alen; int nfrags; u8 *iv; u8 *tail; struct esp_data *esp = x->data; /* skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ clen = skb->len; aead = esp->aead; alen = crypto_aead_authsize(aead); blksize = ALIGN(crypto_aead_blocksize(aead), 4); clen = ALIGN(clen + 2, blksize); if (esp->padlen) clen = ALIGN(clen, esp->padlen); if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) goto error; nfrags = err; tmp = esp_alloc_tmp(aead, nfrags + 1); if (!tmp) goto error; iv = esp_tmp_iv(aead, tmp); req = esp_tmp_givreq(aead, iv); asg = esp_givreq_sg(aead, req); sg = asg + 1; /* Fill padding... */ tail = skb_tail_pointer(trailer); do { int i; for (i=0; i<clen-skb->len - 2; i++) tail[i] = i + 1; } while (0); tail[clen-skb->len - 2] = (clen - skb->len) - 2; tail[clen - skb->len - 1] = *skb_mac_header(skb); pskb_put(skb, trailer, clen - skb->len + alen); skb_push(skb, -skb_network_offset(skb)); esph = ip_esp_hdr(skb); *skb_mac_header(skb) = IPPROTO_ESP; esph->spi = x->id.spi; esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, esph->enc_data + crypto_aead_ivsize(aead) - skb->data, clen + alen); sg_init_one(asg, esph, sizeof(*esph)); aead_givcrypt_set_callback(req, 0, esp_output_done, skb); aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq.output); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_givencrypt(req); if (err == -EINPROGRESS) goto error; if (err == -EBUSY) err = NET_XMIT_DROP; kfree(tmp); error: return err; } static int esp_input_done2(struct sk_buff *skb, int err) { struct xfrm_state *x = xfrm_input_state(skb); struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; int alen = crypto_aead_authsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); int elen = skb->len - hlen; int hdr_len = skb_network_header_len(skb); int padlen; u8 nexthdr[2]; kfree(ESP_SKB_CB(skb)->tmp); if (unlikely(err)) goto out; if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) BUG(); err = -EINVAL; padlen = nexthdr[0]; if (padlen + 2 + alen >= elen) { LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage " "padlen=%d, elen=%d\n", padlen + 2, elen - alen); goto out; } /* ... check padding bits here. Silly. :-) */ pskb_trim(skb, skb->len - alen - padlen - 2); __skb_pull(skb, hlen); skb_set_transport_header(skb, -hdr_len); err = nexthdr[1]; /* RFC4303: Drop dummy packets without any error */ if (err == IPPROTO_NONE) err = -EINVAL; out: return err; } static void esp_input_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; xfrm_input_resume(skb, esp_input_done2(skb, err)); } static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; int ret = 0; void *tmp; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) { ret = -EINVAL; goto out; } if (elen <= 0) { ret = -EINVAL; goto out; } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } ret = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags + 1); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; iv = esp_tmp_iv(aead, tmp); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, sizeof(*esph)); ret = crypto_aead_decrypt(req); if (ret == -EINPROGRESS) goto out; ret = esp_input_done2(skb, ret); out: return ret; } static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); u32 align = max_t(u32, blksize, esp->padlen); u32 rem; mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); rem = mtu & (align - 1); mtu &= ~(align - 1); if (x->props.mode != XFRM_MODE_TUNNEL) { u32 padsize = ((blksize - 1) & 7) + 1; mtu -= blksize - padsize; mtu += min_t(u32, blksize - padsize, rem); } return mtu - 2; } static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); struct xfrm_state *x; if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG) return; x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); if (!x) return; printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", ntohl(esph->spi), &iph->daddr); xfrm_state_put(x); } static void esp6_destroy(struct xfrm_state *x) { struct esp_data *esp = x->data; if (!esp) return; crypto_free_aead(esp->aead); kfree(esp); } static int esp_init_aead(struct xfrm_state *x) { struct esp_data *esp = x->data; struct crypto_aead *aead; int err; aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; esp->aead = aead; err = crypto_aead_setkey(aead, x->aead->alg_key, (x->aead->alg_key_len + 7) / 8); if (err) goto error; err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); if (err) goto error; error: return err; } static int esp_init_authenc(struct xfrm_state *x) { struct esp_data *esp = x->data; struct crypto_aead *aead; struct crypto_authenc_key_param *param; struct rtattr *rta; char *key; char *p; char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; err = -EINVAL; if (x->ealg == NULL) goto error; err = -ENAMETOOLONG; if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; aead = crypto_alloc_aead(authenc_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; esp->aead = aead; keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); err = -ENOMEM; key = kmalloc(keylen, GFP_KERNEL); if (!key) goto error; p = key; rta = (void *)p; rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; rta->rta_len = RTA_LENGTH(sizeof(*param)); param = RTA_DATA(rta); p += RTA_SPACE(sizeof(*param)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); p += (x->aalg->alg_key_len + 7) / 8; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); err = -EINVAL; if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_aead_authsize(aead)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_aead_authsize(aead), aalg_desc->uinfo.auth.icv_fullbits/8); goto free_key; } err = crypto_aead_setauthsize( aead, x->aalg->alg_trunc_len / 8); if (err) goto free_key; } param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); err = crypto_aead_setkey(aead, key, keylen); free_key: kfree(key); error: return err; } static int esp6_init_state(struct xfrm_state *x) { struct esp_data *esp; struct crypto_aead *aead; u32 align; int err; if (x->encap) return -EINVAL; esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; x->data = esp; if (x->aead) err = esp_init_aead(x); else err = esp_init_authenc(x); if (err) goto error; aead = esp->aead; esp->padlen = 0; x->props.header_len = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); switch (x->props.mode) { case XFRM_MODE_BEET: if (x->sel.family != AF_INET6) x->props.header_len += IPV4_BEET_PHMAXLEN + (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); break; case XFRM_MODE_TRANSPORT: break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct ipv6hdr); break; default: goto error; } align = ALIGN(crypto_aead_blocksize(aead), 4); if (esp->padlen) align = max_t(u32, align, esp->padlen); x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); error: return err; } static const struct xfrm_type esp6_type = { .description = "ESP6", .owner = THIS_MODULE, .proto = IPPROTO_ESP, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp6_init_state, .destructor = esp6_destroy, .get_mtu = esp6_get_mtu, .input = esp6_input, .output = esp6_output, .hdr_offset = xfrm6_find_1stfragopt, }; static const struct inet6_protocol esp6_protocol = { .handler = xfrm6_rcv, .err_handler = esp6_err, .flags = INET6_PROTO_NOPOLICY, }; static int __init esp6_init(void) { if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n"); return -EAGAIN; } if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) { printk(KERN_INFO "ipv6 esp init: can't add protocol\n"); xfrm_unregister_type(&esp6_type, AF_INET6); return -EAGAIN; } return 0; } static void __exit esp6_fini(void) { if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0) printk(KERN_INFO "ipv6 esp close: can't remove protocol\n"); if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0) printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n"); } module_init(esp6_init); module_exit(esp6_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
gpl-2.0
pitah81/android_kernel_elephone_p8000
kernel/pid.c
60
15181
/* * Generic pidhash and scalable, time-bounded PID allocator * * (C) 2002-2003 Nadia Yvette Chambers, IBM * (C) 2004 Nadia Yvette Chambers, Oracle * (C) 2002-2004 Ingo Molnar, Red Hat * * pid-structures are backing objects for tasks sharing a given ID to chain * against. There is very little to them aside from hashing them and * parking tasks using given ID's on a list. * * The hash is always changed with the tasklist_lock write-acquired, * and the hash is only accessed with the tasklist_lock at least * read-acquired, so there's no additional SMP locking needed here. * * We have a list of bitmap pages, which bitmaps represent the PID space. * Allocating and freeing PIDs is completely lockless. The worst-case * allocation scenario when all but one out of 1 million PIDs possible are * allocated already: the scanning of 32 list entries and at most PAGE_SIZE * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). * * Pid namespaces: * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM * Many thanks to Oleg Nesterov for comments and help * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/bootmem.h> #include <linux/hash.h> #include <linux/pid_namespace.h> #include <linux/init_task.h> #include <linux/syscalls.h> #include <linux/proc_ns.h> #include <linux/proc_fs.h> #define pid_hashfn(nr, ns) \ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) static struct hlist_head *pid_hash; static unsigned int pidhash_shift = 4; struct pid init_struct_pid = INIT_STRUCT_PID; int pid_max = PID_MAX_DEFAULT; #define RESERVED_PIDS 300 int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; static inline int mk_pid(struct pid_namespace *pid_ns, struct pidmap *map, int off) { return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; } #define find_next_offset(map, off) \ find_next_zero_bit((map)->page, BITS_PER_PAGE, off) /* * PID-map pages start out as NULL, they get allocated upon * first use and are never deallocated. This way a low pid_max * value does not cause lots of bitmaps to be allocated, but * the scheme scales to up to 4 million PIDs, runtime. */ struct pid_namespace init_pid_ns = { .kref = { .refcount = ATOMIC_INIT(2), }, .pidmap = { [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }, .last_pid = 0, .level = 0, .child_reaper = &init_task, .user_ns = &init_user_ns, .proc_inum = PROC_PID_INIT_INO, }; EXPORT_SYMBOL_GPL(init_pid_ns); /* * Note: disable interrupts while the pidmap_lock is held as an * interrupt might come in and do read_lock(&tasklist_lock). * * If we don't disable interrupts there is a nasty deadlock between * detach_pid()->free_pid() and another cpu that does * spin_lock(&pidmap_lock) followed by an interrupt routine that does * read_lock(&tasklist_lock); * * After we clean up the tasklist_lock and know there are no * irq handlers that take it we can leave the interrupts enabled. * For now it is easier to be safe than to prove it can't happen. */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); static void free_pidmap(struct upid *upid) { int nr = upid->nr; struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; int offset = nr & BITS_PER_PAGE_MASK; clear_bit(offset, map->page); atomic_inc(&map->nr_free); } /* * If we started walking pids at 'base', is 'a' seen before 'b'? */ static int pid_before(int base, int a, int b) { /* * This is the same as saying * * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT * and that mapping orders 'a' and 'b' with respect to 'base'. */ return (unsigned)(a - base) < (unsigned)(b - base); } /* * We might be racing with someone else trying to set pid_ns->last_pid * at the pid allocation time (there's also a sysctl for this, but racing * with this one is OK, see comment in kernel/pid_namespace.c about it). * We want the winner to have the "later" value, because if the * "earlier" value prevails, then a pid may get reused immediately. * * Since pids rollover, it is not sufficient to just pick the bigger * value. We have to consider where we started counting from. * * 'base' is the value of pid_ns->last_pid that we observed when * we started looking for a pid. * * 'pid' is the pid that we eventually found. */ static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid) { int prev; int last_write = base; do { prev = last_write; last_write = cmpxchg(&pid_ns->last_pid, prev, pid); } while ((prev != last_write) && (pid_before(base, last_write, pid))); } static int alloc_pidmap(struct pid_namespace *pid_ns) { int i, offset, max_scan, pid, last = pid_ns->last_pid; struct pidmap *map; pid = last + 1; if (pid >= pid_max) pid = RESERVED_PIDS; offset = pid & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; /* * If last_pid points into the middle of the map->page we * want to scan this bitmap block twice, the second time * we start with offset == 0 (or RESERVED_PIDS). */ max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset; for (i = 0; i <= max_scan; ++i) { if (unlikely(!map->page)) { void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); /* * Free the page if someone raced with us * installing it: */ spin_lock_irq(&pidmap_lock); if (!map->page) { map->page = page; page = NULL; } spin_unlock_irq(&pidmap_lock); kfree(page); if (unlikely(!map->page)) break; } if (likely(atomic_read(&map->nr_free))) { for ( ; ; ) { if (!test_and_set_bit(offset, map->page)) { atomic_dec(&map->nr_free); set_last_pid(pid_ns, last, pid); return pid; } offset = find_next_offset(map, offset); if (offset >= BITS_PER_PAGE) break; pid = mk_pid(pid_ns, map, offset); if (pid >= pid_max) break; } } if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { ++map; offset = 0; } else { map = &pid_ns->pidmap[0]; offset = RESERVED_PIDS; if (unlikely(last == offset)) break; } pid = mk_pid(pid_ns, map, offset); } return -1; } int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) { int offset; struct pidmap *map, *end; if (last >= PID_MAX_LIMIT) return -1; offset = (last + 1) & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; end = &pid_ns->pidmap[PIDMAP_ENTRIES]; for (; map < end; map++, offset = 0) { if (unlikely(!map->page)) continue; offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); if (offset < BITS_PER_PAGE) return mk_pid(pid_ns, map, offset); } return -1; } void put_pid(struct pid *pid) { struct pid_namespace *ns; if (!pid) return; ns = pid->numbers[pid->level].ns; if ((atomic_read(&pid->count) == 1) || atomic_dec_and_test(&pid->count)) { kmem_cache_free(ns->pid_cachep, pid); put_pid_ns(ns); } } EXPORT_SYMBOL_GPL(put_pid); static void delayed_put_pid(struct rcu_head *rhp) { struct pid *pid = container_of(rhp, struct pid, rcu); put_pid(pid); } void free_pid(struct pid *pid) { /* We can be called with write_lock_irq(&tasklist_lock) held */ int i; unsigned long flags; spin_lock_irqsave(&pidmap_lock, flags); for (i = 0; i <= pid->level; i++) { struct upid *upid = pid->numbers + i; struct pid_namespace *ns = upid->ns; hlist_del_rcu(&upid->pid_chain); switch(--ns->nr_hashed) { case 2: case 1: /* When all that is left in the pid namespace * is the reaper wake up the reaper. The reaper * may be sleeping in zap_pid_ns_processes(). */ wake_up_process(ns->child_reaper); break; case 0: schedule_work(&ns->proc_work); break; } } spin_unlock_irqrestore(&pidmap_lock, flags); for (i = 0; i <= pid->level; i++) free_pidmap(pid->numbers + i); call_rcu(&pid->rcu, delayed_put_pid); } struct pid *alloc_pid(struct pid_namespace *ns) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) goto out; tmp = ns; pid->level = ns->level; for (i = ns->level; i >= 0; i--) { nr = alloc_pidmap(tmp); if (nr < 0) goto out_free; pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } if (unlikely(is_child_reaper(pid))) { if (pid_ns_prepare_proc(ns)) goto out_free; } get_pid_ns(ns); atomic_set(&pid->count, 1); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); upid = pid->numbers + ns->level; spin_lock_irq(&pidmap_lock); if (!(ns->nr_hashed & PIDNS_HASH_ADDING)) goto out_unlock; for ( ; upid >= pid->numbers; --upid) { hlist_add_head_rcu(&upid->pid_chain, &pid_hash[pid_hashfn(upid->nr, upid->ns)]); upid->ns->nr_hashed++; } spin_unlock_irq(&pidmap_lock); out: return pid; out_unlock: spin_unlock_irq(&pidmap_lock); put_pid_ns(ns); out_free: while (++i <= ns->level) free_pidmap(pid->numbers + i); kmem_cache_free(ns->pid_cachep, pid); pid = NULL; goto out; } void disable_pid_allocation(struct pid_namespace *ns) { spin_lock_irq(&pidmap_lock); ns->nr_hashed &= ~PIDNS_HASH_ADDING; spin_unlock_irq(&pidmap_lock); } struct pid *find_pid_ns(int nr, struct pid_namespace *ns) { struct upid *pnr; hlist_for_each_entry_rcu(pnr, &pid_hash[pid_hashfn(nr, ns)], pid_chain) if (pnr->nr == nr && pnr->ns == ns) return container_of(pnr, struct pid, numbers[ns->level]); return NULL; } EXPORT_SYMBOL_GPL(find_pid_ns); struct pid *find_vpid(int nr) { return find_pid_ns(nr, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(find_vpid); /* * attach_pid() must be called with the tasklist_lock write-held. */ void attach_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { struct pid_link *link; link = &task->pids[type]; link->pid = pid; hlist_add_head_rcu(&link->node, &pid->tasks[type]); } static void __change_pid(struct task_struct *task, enum pid_type type, struct pid *new) { struct pid_link *link; struct pid *pid; int tmp; link = &task->pids[type]; pid = link->pid; hlist_del_rcu(&link->node); link->pid = new; for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (!hlist_empty(&pid->tasks[tmp])) return; free_pid(pid); } void detach_pid(struct task_struct *task, enum pid_type type) { __change_pid(task, type, NULL); } void change_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { __change_pid(task, type, pid); attach_pid(task, type, pid); } /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type type) { new->pids[type].pid = old->pids[type].pid; hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); } struct task_struct *pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result = NULL; if (pid) { struct hlist_node *first; first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), lockdep_tasklist_lock_is_held()); if (first) result = hlist_entry(first, struct task_struct, pids[(type)].node); } return result; } EXPORT_SYMBOL(pid_task); /* * Must be called under rcu_read_lock(). */ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { rcu_lockdep_assert(rcu_read_lock_held(), "find_task_by_pid_ns() needs rcu_read_lock()" " protection"); return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } EXPORT_SYMBOL_GPL(find_task_by_pid_ns); struct task_struct *find_task_by_vpid(pid_t vnr) { return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); } struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; rcu_read_lock(); if (type != PIDTYPE_PID) task = task->group_leader; pid = get_pid(task->pids[type].pid); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(get_task_pid); struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result; rcu_read_lock(); result = pid_task(pid, type); if (result) get_task_struct(result); rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(get_pid_task); struct pid *find_get_pid(pid_t nr) { struct pid *pid; rcu_read_lock(); pid = get_pid(find_vpid(nr)); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(find_get_pid); pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) { struct upid *upid; pid_t nr = 0; if (pid && ns->level <= pid->level) { upid = &pid->numbers[ns->level]; if (upid->ns == ns) nr = upid->nr; } return nr; } EXPORT_SYMBOL_GPL(pid_nr_ns); pid_t pid_vnr(struct pid *pid) { return pid_nr_ns(pid, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(pid_vnr); pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns) { pid_t nr = 0; rcu_read_lock(); if (!ns) ns = task_active_pid_ns(current); if (likely(pid_alive(task))) { if (type != PIDTYPE_PID) task = task->group_leader; nr = pid_nr_ns(task->pids[type].pid, ns); } rcu_read_unlock(); return nr; } EXPORT_SYMBOL(__task_pid_nr_ns); pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return pid_nr_ns(task_tgid(tsk), ns); } EXPORT_SYMBOL(task_tgid_nr_ns); struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) { return ns_of_pid(task_pid(tsk)); } EXPORT_SYMBOL_GPL(task_active_pid_ns); /* * Used by proc to find the first pid that is greater than or equal to nr. * * If there is a pid at nr this function is exactly the same as find_pid_ns. */ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) { struct pid *pid; do { pid = find_pid_ns(nr, ns); if (pid) break; nr = next_pidmap(ns, nr); } while (nr > 0); return pid; } /* * The pid hash table is scaled according to the amount of memory in the * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or * more. */ void __init pidhash_init(void) { unsigned int i, pidhash_size; pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, HASH_EARLY | HASH_SMALL, &pidhash_shift, NULL, 0, 4096); pidhash_size = 1U << pidhash_shift; for (i = 0; i < pidhash_size; i++) INIT_HLIST_HEAD(&pid_hash[i]); } void __init pidmap_init(void) { /* Veryify no one has done anything silly */ BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING); /* bump default and minimum pid_max based on number of cpus */ pid_max = min(pid_max_max, max_t(int, pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); pid_max_min = max_t(int, pid_max_min, PIDS_PER_CPU_MIN * num_possible_cpus()); pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); /* Reserve PID 0. We never call free_pidmap(0) */ set_bit(0, init_pid_ns.pidmap[0].page); atomic_dec(&init_pid_ns.pidmap[0].nr_free); init_pid_ns.nr_hashed = PIDNS_HASH_ADDING; init_pid_ns.pid_cachep = KMEM_CACHE(pid, SLAB_HWCACHE_ALIGN | SLAB_PANIC); }
gpl-2.0
mikedanese/linux
drivers/clk/mediatek/reset.c
316
2387
/* * Copyright (c) 2014 MediaTek Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset-controller.h> #include <linux/slab.h> #include "clk-mtk.h" struct mtk_reset { struct regmap *regmap; int regofs; struct reset_controller_dev rcdev; }; static int mtk_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) { struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2), BIT(id % 32), ~0); } static int mtk_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) { struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2), BIT(id % 32), 0); } static int mtk_reset(struct reset_controller_dev *rcdev, unsigned long id) { int ret; ret = mtk_reset_assert(rcdev, id); if (ret) return ret; return mtk_reset_deassert(rcdev, id); } static const struct reset_control_ops mtk_reset_ops = { .assert = mtk_reset_assert, .deassert = mtk_reset_deassert, .reset = mtk_reset, }; void mtk_register_reset_controller(struct device_node *np, unsigned int num_regs, int regofs) { struct mtk_reset *data; int ret; struct regmap *regmap; regmap = syscon_node_to_regmap(np); if (IS_ERR(regmap)) { pr_err("Cannot find regmap for %s: %ld\n", np->full_name, PTR_ERR(regmap)); return; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return; data->regmap = regmap; data->regofs = regofs; data->rcdev.owner = THIS_MODULE; data->rcdev.nr_resets = num_regs * 32; data->rcdev.ops = &mtk_reset_ops; data->rcdev.of_node = np; ret = reset_controller_register(&data->rcdev); if (ret) { pr_err("could not register reset controller: %d\n", ret); kfree(data); return; } }
gpl-2.0
Chad0989/incredikernel
drivers/gpu/drm/drm_pci.c
316
3700
/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */ /** * \file drm_pci.c * \brief Functions and ioctls to manage PCI memory * * \warning These interfaces aren't stable yet. * * \todo Implement the remaining ioctl's for the PCI pools. * \todo The wrappers here are so thin that they would be better off inlined.. * * \author José Fonseca <jrfonseca@tungstengraphics.com> * \author Leif Delgass <ldelgass@retinalburn.net> */ /* * Copyright 2003 José Fonseca. * Copyright 2003 Leif Delgass. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/pci.h> #include <linux/dma-mapping.h> #include "drmP.h" /**********************************************************************/ /** \name PCI memory */ /*@{*/ /** * \brief Allocate a PCI consistent memory block, for DMA. */ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) { drm_dma_handle_t *dmah; #if 1 unsigned long addr; size_t sz; #endif /* pci_alloc_consistent only guarantees alignment to the smallest * PAGE_SIZE order which is greater than or equal to the requested size. * Return NULL here for now to make sure nobody tries for larger alignment */ if (align > size) return NULL; dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); if (!dmah) return NULL; dmah->size = size; dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP); if (dmah->vaddr == NULL) { kfree(dmah); return NULL; } memset(dmah->vaddr, 0, size); /* XXX - Is virt_to_page() legal for consistent mem? */ /* Reserve */ for (addr = (unsigned long)dmah->vaddr, sz = size; sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { SetPageReserved(virt_to_page(addr)); } return dmah; } EXPORT_SYMBOL(drm_pci_alloc); /** * \brief Free a PCI consistent memory block without freeing its descriptor. * * This function is for internal use in the Linux-specific DRM core code. */ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) { #if 1 unsigned long addr; size_t sz; #endif if (dmah->vaddr) { /* XXX - Is virt_to_page() legal for consistent mem? */ /* Unreserve */ for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); } dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, dmah->busaddr); } } /** * \brief Free a PCI consistent memory block */ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) { __drm_pci_free(dev, dmah); kfree(dmah); } EXPORT_SYMBOL(drm_pci_free); /*@}*/
gpl-2.0
Lime1iME/Testing
sound/soc/msm/lpass-dma.c
1340
11575
/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/android_pmem.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/msm_audio.h> #include <linux/clk.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <mach/msm_iomap-8x60.h> #include <mach/audio_dma_msm8k.h> #include <sound/dai.h> #include "lpass-pcm.h" struct dai_baseinfo { void __iomem *base; }; static struct dai_baseinfo dai_info; struct dai_drv { u8 *buffer; u32 buffer_phys; int channels; irqreturn_t (*callback) (int intrsrc, void *private_data); void *private_data; int in_use; u32 buffer_len; u32 period_len; u32 master_mode; }; static struct dai_drv *dai[MAX_CHANNELS]; static spinlock_t dai_lock; static int dai_find_dma_channel(uint32_t intrsrc) { int i, dma_channel = 0; pr_debug("%s\n", __func__); for (i = 0; i <= 27; i += 3) { if (intrsrc & (1 << i)) { dma_channel = i / 3; break; } } return dma_channel; } void register_dma_irq_handler(int dma_ch, irqreturn_t (*callback) (int intrsrc, void *private_data), void *private_data) { pr_debug("%s\n", __func__); dai[dma_ch]->callback = callback; dai[dma_ch]->private_data = private_data; } void unregister_dma_irq_handler(int dma_ch) { pr_debug("%s\n", __func__); dai[dma_ch]->callback = NULL; dai[dma_ch]->private_data = NULL; } static irqreturn_t dai_irq_handler(int irq, void *data) { unsigned long flag; uint32_t intrsrc; uint32_t dma_ch = 0; irqreturn_t ret = IRQ_HANDLED; pr_debug("%s\n", __func__); spin_lock_irqsave(&dai_lock, flag); intrsrc = readl(dai_info.base + LPAIF_IRQ_STAT(0)); writel(intrsrc, dai_info.base + LPAIF_IRQ_CLEAR(0)); mb(); while (intrsrc) { dma_ch = dai_find_dma_channel(intrsrc); if (!dai[dma_ch]->callback) goto handled; if (!dai[dma_ch]->private_data) goto handled; ret = dai[dma_ch]->callback(intrsrc, dai[dma_ch]->private_data); intrsrc &= ~(0x7 << (dma_ch * 3)); } handled: spin_unlock_irqrestore(&dai_lock, flag); return ret; } void dai_print_state(uint32_t dma_ch) { int i = 0; unsigned long *ptrmem = (unsigned long *)dai_info.base; for (i = 0; i < 4; i++, ++ptrmem) pr_debug("[0x%08x]=0x%08x\n", (unsigned int)ptrmem, (unsigned int)*ptrmem); ptrmem = (unsigned long *)(dai_info.base + DMA_CH_CTL_BASE + DMA_CH_INDEX(dma_ch)); for (i = 0; i < 10; i++, ++ptrmem) pr_debug("[0x%08x]=0x%08x\n", (unsigned int)ptrmem, (unsigned int) *ptrmem); } static int dai_enable_irq(uint32_t dma_ch) { int ret; pr_debug("%s\n", __func__); ret = request_irq(LPASS_SCSS_AUDIO_IF_OUT0_IRQ, dai_irq_handler, IRQF_TRIGGER_RISING | IRQF_SHARED, "msm-i2s", (void *) (dma_ch+1)); if (ret < 0) { pr_debug("Request Irq Failed err = %d\n", ret); return ret; } return ret; } static void dai_config_dma(uint32_t dma_ch) { pr_debug("%s dma_ch = %u\n", __func__, dma_ch); writel(dai[dma_ch]->buffer_phys, dai_info.base + LPAIF_DMA_BASE(dma_ch)); writel(((dai[dma_ch]->buffer_len >> 2) - 1), dai_info.base + LPAIF_DMA_BUFF_LEN(dma_ch)); writel(((dai[dma_ch]->period_len >> 2) - 1), dai_info.base + LPAIF_DMA_PER_LEN(dma_ch)); mb(); } static void dai_enable_codec(uint32_t dma_ch, int codec) { uint32_t intrVal; uint32_t i2sctl; pr_debug("%s\n", __func__); intrVal = readl(dai_info.base + LPAIF_IRQ_EN(0)); intrVal = intrVal | (7 << (dma_ch * 3)); writel(intrVal, dai_info.base + LPAIF_IRQ_EN(0)); if (codec == DAI_SPKR) { writel(0x0813, dai_info.base + LPAIF_DMA_CTL(dma_ch)); i2sctl = 0x4400; i2sctl |= (dai[dma_ch]->master_mode ? WS_SRC_INT : WS_SRC_EXT); writel(i2sctl, dai_info.base + LPAIF_I2S_CTL_OFFSET(DAI_SPKR)); } else if (codec == DAI_MIC) { writel(0x81b, dai_info.base + LPAIF_DMA_CTL(dma_ch)); i2sctl = 0x0110; i2sctl |= (dai[dma_ch]->master_mode ? WS_SRC_INT : WS_SRC_EXT); writel(i2sctl, dai_info.base + LPAIF_I2S_CTL_OFFSET(DAI_MIC)); } } static void dai_disable_codec(uint32_t dma_ch, int codec) { uint32_t intrVal = 0; uint32_t intrVal1 = 0; unsigned long flag = 0x0; pr_debug("%s\n", __func__); spin_lock_irqsave(&dai_lock, flag); intrVal1 = readl(dai_info.base + LPAIF_I2S_CTL_OFFSET(codec)); if (codec == DAI_SPKR) intrVal1 = intrVal1 & ~(1 << 14); else if (codec == DAI_MIC) intrVal1 = intrVal1 & ~(1 << 8); writel(intrVal1, dai_info.base + LPAIF_I2S_CTL_OFFSET(codec)); intrVal = 0x0; writel(intrVal, dai_info.base + LPAIF_DMA_CTL(dma_ch)); spin_unlock_irqrestore(&dai_lock, flag); } int dai_open(uint32_t dma_ch) { pr_debug("%s\n", __func__); if (!dai_info.base) { pr_debug("%s failed as no msm-dai device\n", __func__); return -ENODEV; } if (dma_ch >= MAX_CHANNELS) { pr_debug("%s over max channesl %d\n", __func__, dma_ch); return -ENODEV; } return 0; } void dai_close(uint32_t dma_ch) { pr_debug("%s\n", __func__); if ((dma_ch >= 0) && (dma_ch < 5)) dai_disable_codec(dma_ch, DAI_SPKR); else dai_disable_codec(dma_ch, DAI_MIC); free_irq(LPASS_SCSS_AUDIO_IF_OUT0_IRQ, (void *) (dma_ch + 1)); } void dai_set_master_mode(uint32_t dma_ch, int mode) { if (dma_ch < MAX_CHANNELS) dai[dma_ch]->master_mode = mode; else pr_err("%s: invalid dma channel\n", __func__); } int dai_set_params(uint32_t dma_ch, struct dai_dma_params *params) { pr_debug("%s\n", __func__); dai[dma_ch]->buffer = params->buffer; dai[dma_ch]->buffer_phys = params->src_start; dai[dma_ch]->channels = params->channels; dai[dma_ch]->buffer_len = params->buffer_size; dai[dma_ch]->period_len = params->period_size; mb(); dai_config_dma(dma_ch); return dma_ch; } int dai_start(uint32_t dma_ch) { unsigned long flag = 0x0; spin_lock_irqsave(&dai_lock, flag); dai_enable_irq(dma_ch); if ((dma_ch >= 0) && (dma_ch < 5)) dai_enable_codec(dma_ch, DAI_SPKR); else dai_enable_codec(dma_ch, DAI_MIC); spin_unlock_irqrestore(&dai_lock, flag); dai_print_state(dma_ch); return 0; } #define HDMI_BURST_INCR4 (1 << 11) #define HDMI_WPSCNT (1 << 8) #define HDMI_AUDIO_INTF (5 << 4) #define HDMI_FIFO_WATER_MARK (7 << 1) #define HDMI_ENABLE (1) int dai_start_hdmi(uint32_t dma_ch) { unsigned long flag = 0x0; uint32_t val; pr_debug("%s dma_ch = %u\n", __func__, dma_ch); spin_lock_irqsave(&dai_lock, flag); dai_enable_irq(dma_ch); if ((dma_ch >= 0) && (dma_ch < 5)) { val = readl(dai_info.base + LPAIF_IRQ_EN(0)); val = val | (7 << (dma_ch * 3)); writel(val, dai_info.base + LPAIF_IRQ_EN(0)); mb(); val = (HDMI_BURST_INCR4 | HDMI_WPSCNT | HDMI_AUDIO_INTF | HDMI_FIFO_WATER_MARK | HDMI_ENABLE); writel(val, dai_info.base + LPAIF_DMA_CTL(dma_ch)); } spin_unlock_irqrestore(&dai_lock, flag); mb(); dai_print_state(dma_ch); return 0; } int wait_for_dma_cnt_stop(uint32_t dma_ch) { uint32_t dma_per_cnt_reg_val, dma_per_cnt, prev_dma_per_cnt; uint32_t i; pr_info("%s dma_ch %u\n", __func__, dma_ch); dma_per_cnt_reg_val = readl_relaxed(dai_info.base + LPAIF_DMA_PER_CNT(dma_ch)); dma_per_cnt = ((LPAIF_DMA_PER_CNT_PER_CNT_MASK & dma_per_cnt_reg_val) >> LPAIF_DMA_PER_CNT_PER_CNT_SHIFT) - ((LPAIF_DMA_PER_CNT_FIFO_WORDCNT_MASK & dma_per_cnt_reg_val) >> LPAIF_DMA_PER_CNT_FIFO_WORDCNT_SHIFT); prev_dma_per_cnt = dma_per_cnt; i = 1; pr_info("%s: i = %u dma_per_cnt_reg_val 0x%08x , dma_per_cnt %u\n", __func__, i, dma_per_cnt_reg_val, dma_per_cnt); while (i <= 50) { msleep(50); dma_per_cnt_reg_val = readl_relaxed(dai_info.base + LPAIF_DMA_PER_CNT(dma_ch)); dma_per_cnt = ((LPAIF_DMA_PER_CNT_PER_CNT_MASK & dma_per_cnt_reg_val) >> LPAIF_DMA_PER_CNT_PER_CNT_SHIFT) - ((LPAIF_DMA_PER_CNT_FIFO_WORDCNT_MASK & dma_per_cnt_reg_val) >> LPAIF_DMA_PER_CNT_FIFO_WORDCNT_SHIFT); i++; pr_info("%s: i = %u dma_per_cnt_reg_val 0x%08x , dma_per_cnt %u\n", __func__, i, dma_per_cnt_reg_val, dma_per_cnt); if (prev_dma_per_cnt == dma_per_cnt) break; prev_dma_per_cnt = dma_per_cnt; } return 0; } void dai_stop_hdmi(uint32_t dma_ch) { unsigned long flag = 0x0; uint32_t intrVal; uint32_t int_mask = 0x00000007; pr_debug("%s dma_ch %u\n", __func__, dma_ch); spin_lock_irqsave(&dai_lock, flag); free_irq(LPASS_SCSS_AUDIO_IF_OUT0_IRQ, (void *) (dma_ch + 1)); intrVal = 0x0; writel(intrVal, dai_info.base + LPAIF_DMA_CTL(dma_ch)); mb(); intrVal = readl(dai_info.base + LPAIF_IRQ_EN(0)); int_mask = ((int_mask) << (dma_ch * 3)); int_mask = ~int_mask; intrVal = intrVal & int_mask; writel(intrVal, dai_info.base + LPAIF_IRQ_EN(0)); mb(); spin_unlock_irqrestore(&dai_lock, flag); } int dai_stop(uint32_t dma_ch) { pr_debug("%s\n", __func__); return 0; } uint32_t dai_get_dma_pos(uint32_t dma_ch) { uint32_t addr; pr_debug("%s\n", __func__); addr = readl(dai_info.base + LPAIF_DMA_CURR_ADDR(dma_ch)); return addr; } static int __devinit dai_probe(struct platform_device *pdev) { int rc = 0; int i = 0; struct resource *src; src = platform_get_resource_byname(pdev, IORESOURCE_MEM, "msm-dai"); if (!src) { rc = -ENODEV; pr_debug("%s Error rc=%d\n", __func__, rc); goto error; } for (i = 0; i <= MAX_CHANNELS; i++) { dai[i] = kzalloc(sizeof(struct dai_drv), GFP_KERNEL); if (!dai[0]) { pr_debug("Allocation failed for dma_channel = 0\n"); return -ENODEV; } } dai_info.base = ioremap(src->start, (src->end - src->start) + 1); pr_debug("%s: msm-dai: 0x%08x\n", __func__, (unsigned int)dai_info.base); spin_lock_init(&dai_lock); error: return rc; } static int dai_remove(struct platform_device *pdev) { iounmap(dai_info.base); return 0; } static struct platform_driver dai_driver = { .probe = dai_probe, .remove = dai_remove, .driver = { .name = "msm-dai", .owner = THIS_MODULE }, }; static struct resource msm_lpa_resources[] = { { .start = MSM_LPA_PHYS, .end = MSM_LPA_END, .flags = IORESOURCE_MEM, .name = "msm-dai", }, }; static struct platform_device *codec_device; static int msm_dai_dev_register(const char *name) { int ret = 0; pr_debug("%s : called\n", __func__); codec_device = platform_device_alloc(name, -1); if (codec_device == NULL) { pr_debug("Failed to allocate %s\n", name); return -ENODEV; } platform_set_drvdata(codec_device, (void *)&dai_info); platform_device_add_resources(codec_device, &msm_lpa_resources[0], ARRAY_SIZE(msm_lpa_resources)); ret = platform_device_add(codec_device); if (ret != 0) { pr_debug("Failed to register %s: %d\n", name, ret); platform_device_put(codec_device); } return ret; } static int __init dai_init(void) { if (msm_dai_dev_register("msm-dai")) { pr_notice("dai_init: msm-dai Failed"); return -ENODEV; } return platform_driver_register(&dai_driver); } static void __exit dai_exit(void) { platform_driver_unregister(&dai_driver); platform_device_put(codec_device); } module_init(dai_init); module_exit(dai_exit); MODULE_DESCRIPTION("MSM I2S driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Bogdacutu/STLinux-Kernel
drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
1852
2405
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/mc.h> struct nvc0_mc_priv { struct nouveau_mc base; }; static const struct nouveau_mc_intr nvc0_mc_intr[] = { { 0x00000001, NVDEV_ENGINE_PPP }, { 0x00000020, NVDEV_ENGINE_COPY0 }, { 0x00000040, NVDEV_ENGINE_COPY1 }, { 0x00000100, NVDEV_ENGINE_FIFO }, { 0x00001000, NVDEV_ENGINE_GR }, { 0x00008000, NVDEV_ENGINE_BSP }, { 0x00040000, NVDEV_SUBDEV_THERM }, { 0x00020000, NVDEV_ENGINE_VP }, { 0x00100000, NVDEV_SUBDEV_TIMER }, { 0x00200000, NVDEV_SUBDEV_GPIO }, { 0x02000000, NVDEV_SUBDEV_LTCG }, { 0x04000000, NVDEV_ENGINE_DISP }, { 0x10000000, NVDEV_SUBDEV_BUS }, { 0x40000000, NVDEV_SUBDEV_IBUS }, { 0x80000000, NVDEV_ENGINE_SW }, {}, }; static int nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nvc0_mc_priv *priv; int ret; ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; return 0; } struct nouveau_oclass nvc0_mc_oclass = { .handle = NV_SUBDEV(MC, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_mc_ctor, .dtor = _nouveau_mc_dtor, .init = nv50_mc_init, .fini = _nouveau_mc_fini, }, };
gpl-2.0
rogrady/lin_imx6
drivers/video/backlight/adp8860_bl.c
1852
22676
/* * Backlight driver for Analog Devices ADP8860 Backlight Devices * * Copyright 2009-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/version.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/leds.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/i2c/adp8860.h> #define ADP8860_EXT_FEATURES #define ADP8860_USE_LEDS #define ADP8860_MFDVID 0x00 /* Manufacturer and device ID */ #define ADP8860_MDCR 0x01 /* Device mode and status */ #define ADP8860_MDCR2 0x02 /* Device mode and Status Register 2 */ #define ADP8860_INTR_EN 0x03 /* Interrupts enable */ #define ADP8860_CFGR 0x04 /* Configuration register */ #define ADP8860_BLSEN 0x05 /* Sink enable backlight or independent */ #define ADP8860_BLOFF 0x06 /* Backlight off timeout */ #define ADP8860_BLDIM 0x07 /* Backlight dim timeout */ #define ADP8860_BLFR 0x08 /* Backlight fade in and out rates */ #define ADP8860_BLMX1 0x09 /* Backlight (Brightness Level 1-daylight) maximum current */ #define ADP8860_BLDM1 0x0A /* Backlight (Brightness Level 1-daylight) dim current */ #define ADP8860_BLMX2 0x0B /* Backlight (Brightness Level 2-office) maximum current */ #define ADP8860_BLDM2 0x0C /* Backlight (Brightness Level 2-office) dim current */ #define ADP8860_BLMX3 0x0D /* Backlight (Brightness Level 3-dark) maximum current */ #define ADP8860_BLDM3 0x0E /* Backlight (Brightness Level 3-dark) dim current */ #define ADP8860_ISCFR 0x0F /* Independent sink current fade control register */ #define ADP8860_ISCC 0x10 /* Independent sink current control register */ #define ADP8860_ISCT1 0x11 /* Independent Sink Current Timer Register LED[7:5] */ #define ADP8860_ISCT2 0x12 /* Independent Sink Current Timer Register LED[4:1] */ #define ADP8860_ISCF 0x13 /* Independent sink current fade register */ #define ADP8860_ISC7 0x14 /* Independent Sink Current LED7 */ #define ADP8860_ISC6 0x15 /* Independent Sink Current LED6 */ #define ADP8860_ISC5 0x16 /* Independent Sink Current LED5 */ #define ADP8860_ISC4 0x17 /* Independent Sink Current LED4 */ #define ADP8860_ISC3 0x18 /* Independent Sink Current LED3 */ #define ADP8860_ISC2 0x19 /* Independent Sink Current LED2 */ #define ADP8860_ISC1 0x1A /* Independent Sink Current LED1 */ #define ADP8860_CCFG 0x1B /* Comparator configuration */ #define ADP8860_CCFG2 0x1C /* Second comparator configuration */ #define ADP8860_L2_TRP 0x1D /* L2 comparator reference */ #define ADP8860_L2_HYS 0x1E /* L2 hysteresis */ #define ADP8860_L3_TRP 0x1F /* L3 comparator reference */ #define ADP8860_L3_HYS 0x20 /* L3 hysteresis */ #define ADP8860_PH1LEVL 0x21 /* First phototransistor ambient light level-low byte register */ #define ADP8860_PH1LEVH 0x22 /* First phototransistor ambient light level-high byte register */ #define ADP8860_PH2LEVL 0x23 /* Second phototransistor ambient light level-low byte register */ #define ADP8860_PH2LEVH 0x24 /* Second phototransistor ambient light level-high byte register */ #define ADP8860_MANUFID 0x0 /* Analog Devices ADP8860 Manufacturer ID */ #define ADP8861_MANUFID 0x4 /* Analog Devices ADP8861 Manufacturer ID */ #define ADP8863_MANUFID 0x2 /* Analog Devices ADP8863 Manufacturer ID */ #define ADP8860_DEVID(x) ((x) & 0xF) #define ADP8860_MANID(x) ((x) >> 4) /* MDCR Device mode and status */ #define INT_CFG (1 << 6) #define NSTBY (1 << 5) #define DIM_EN (1 << 4) #define GDWN_DIS (1 << 3) #define SIS_EN (1 << 2) #define CMP_AUTOEN (1 << 1) #define BLEN (1 << 0) /* ADP8860_CCFG Main ALS comparator level enable */ #define L3_EN (1 << 1) #define L2_EN (1 << 0) #define CFGR_BLV_SHIFT 3 #define CFGR_BLV_MASK 0x3 #define ADP8860_FLAG_LED_MASK 0xFF #define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4)) #define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1)) #define ALS_CCFG_VAL(filt) ((0x7 & filt) << 5) enum { adp8860, adp8861, adp8863 }; struct adp8860_led { struct led_classdev cdev; struct work_struct work; struct i2c_client *client; enum led_brightness new_brightness; int id; int flags; }; struct adp8860_bl { struct i2c_client *client; struct backlight_device *bl; struct adp8860_led *led; struct adp8860_backlight_platform_data *pdata; struct mutex lock; unsigned long cached_daylight_max; int id; int revid; int current_brightness; unsigned en_ambl_sens:1; unsigned gdwn_dis:1; }; static int adp8860_read(struct i2c_client *client, int reg, uint8_t *val) { int ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) { dev_err(&client->dev, "failed reading at 0x%02x\n", reg); return ret; } *val = (uint8_t)ret; return 0; } static int adp8860_write(struct i2c_client *client, u8 reg, u8 val) { return i2c_smbus_write_byte_data(client, reg, val); } static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask) { struct adp8860_bl *data = i2c_get_clientdata(client); uint8_t reg_val; int ret; mutex_lock(&data->lock); ret = adp8860_read(client, reg, &reg_val); if (!ret && ((reg_val & bit_mask) == 0)) { reg_val |= bit_mask; ret = adp8860_write(client, reg, reg_val); } mutex_unlock(&data->lock); return ret; } static int adp8860_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask) { struct adp8860_bl *data = i2c_get_clientdata(client); uint8_t reg_val; int ret; mutex_lock(&data->lock); ret = adp8860_read(client, reg, &reg_val); if (!ret && (reg_val & bit_mask)) { reg_val &= ~bit_mask; ret = adp8860_write(client, reg, reg_val); } mutex_unlock(&data->lock); return ret; } /* * Independent sink / LED */ #if defined(ADP8860_USE_LEDS) static void adp8860_led_work(struct work_struct *work) { struct adp8860_led *led = container_of(work, struct adp8860_led, work); adp8860_write(led->client, ADP8860_ISC1 - led->id + 1, led->new_brightness >> 1); } static void adp8860_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct adp8860_led *led; led = container_of(led_cdev, struct adp8860_led, cdev); led->new_brightness = value; schedule_work(&led->work); } static int adp8860_led_setup(struct adp8860_led *led) { struct i2c_client *client = led->client; int ret = 0; ret = adp8860_write(client, ADP8860_ISC1 - led->id + 1, 0); ret |= adp8860_set_bits(client, ADP8860_ISCC, 1 << (led->id - 1)); if (led->id > 4) ret |= adp8860_set_bits(client, ADP8860_ISCT1, (led->flags & 0x3) << ((led->id - 5) * 2)); else ret |= adp8860_set_bits(client, ADP8860_ISCT2, (led->flags & 0x3) << ((led->id - 1) * 2)); return ret; } static int __devinit adp8860_led_probe(struct i2c_client *client) { struct adp8860_backlight_platform_data *pdata = client->dev.platform_data; struct adp8860_bl *data = i2c_get_clientdata(client); struct adp8860_led *led, *led_dat; struct led_info *cur_led; int ret, i; led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL); if (led == NULL) { dev_err(&client->dev, "failed to alloc memory\n"); return -ENOMEM; } ret = adp8860_write(client, ADP8860_ISCFR, pdata->led_fade_law); ret = adp8860_write(client, ADP8860_ISCT1, (pdata->led_on_time & 0x3) << 6); ret |= adp8860_write(client, ADP8860_ISCF, FADE_VAL(pdata->led_fade_in, pdata->led_fade_out)); if (ret) { dev_err(&client->dev, "failed to write\n"); goto err_free; } for (i = 0; i < pdata->num_leds; ++i) { cur_led = &pdata->leds[i]; led_dat = &led[i]; led_dat->id = cur_led->flags & ADP8860_FLAG_LED_MASK; if (led_dat->id > 7 || led_dat->id < 1) { dev_err(&client->dev, "Invalid LED ID %d\n", led_dat->id); goto err; } if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) { dev_err(&client->dev, "LED %d used by Backlight\n", led_dat->id); goto err; } led_dat->cdev.name = cur_led->name; led_dat->cdev.default_trigger = cur_led->default_trigger; led_dat->cdev.brightness_set = adp8860_led_set; led_dat->cdev.brightness = LED_OFF; led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT; led_dat->client = client; led_dat->new_brightness = LED_OFF; INIT_WORK(&led_dat->work, adp8860_led_work); ret = led_classdev_register(&client->dev, &led_dat->cdev); if (ret) { dev_err(&client->dev, "failed to register LED %d\n", led_dat->id); goto err; } ret = adp8860_led_setup(led_dat); if (ret) { dev_err(&client->dev, "failed to write\n"); i++; goto err; } } data->led = led; return 0; err: for (i = i - 1; i >= 0; --i) { led_classdev_unregister(&led[i].cdev); cancel_work_sync(&led[i].work); } err_free: kfree(led); return ret; } static int __devexit adp8860_led_remove(struct i2c_client *client) { struct adp8860_backlight_platform_data *pdata = client->dev.platform_data; struct adp8860_bl *data = i2c_get_clientdata(client); int i; for (i = 0; i < pdata->num_leds; i++) { led_classdev_unregister(&data->led[i].cdev); cancel_work_sync(&data->led[i].work); } kfree(data->led); return 0; } #else static int __devinit adp8860_led_probe(struct i2c_client *client) { return 0; } static int __devexit adp8860_led_remove(struct i2c_client *client) { return 0; } #endif static int adp8860_bl_set(struct backlight_device *bl, int brightness) { struct adp8860_bl *data = bl_get_data(bl); struct i2c_client *client = data->client; int ret = 0; if (data->en_ambl_sens) { if ((brightness > 0) && (brightness < ADP8860_MAX_BRIGHTNESS)) { /* Disable Ambient Light auto adjust */ ret |= adp8860_clr_bits(client, ADP8860_MDCR, CMP_AUTOEN); ret |= adp8860_write(client, ADP8860_BLMX1, brightness); } else { /* * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust * restore daylight l1 sysfs brightness */ ret |= adp8860_write(client, ADP8860_BLMX1, data->cached_daylight_max); ret |= adp8860_set_bits(client, ADP8860_MDCR, CMP_AUTOEN); } } else ret |= adp8860_write(client, ADP8860_BLMX1, brightness); if (data->current_brightness && brightness == 0) ret |= adp8860_set_bits(client, ADP8860_MDCR, DIM_EN); else if (data->current_brightness == 0 && brightness) ret |= adp8860_clr_bits(client, ADP8860_MDCR, DIM_EN); if (!ret) data->current_brightness = brightness; return ret; } static int adp8860_bl_update_status(struct backlight_device *bl) { int brightness = bl->props.brightness; if (bl->props.power != FB_BLANK_UNBLANK) brightness = 0; if (bl->props.fb_blank != FB_BLANK_UNBLANK) brightness = 0; return adp8860_bl_set(bl, brightness); } static int adp8860_bl_get_brightness(struct backlight_device *bl) { struct adp8860_bl *data = bl_get_data(bl); return data->current_brightness; } static const struct backlight_ops adp8860_bl_ops = { .update_status = adp8860_bl_update_status, .get_brightness = adp8860_bl_get_brightness, }; static int adp8860_bl_setup(struct backlight_device *bl) { struct adp8860_bl *data = bl_get_data(bl); struct i2c_client *client = data->client; struct adp8860_backlight_platform_data *pdata = data->pdata; int ret = 0; ret |= adp8860_write(client, ADP8860_BLSEN, ~pdata->bl_led_assign); ret |= adp8860_write(client, ADP8860_BLMX1, pdata->l1_daylight_max); ret |= adp8860_write(client, ADP8860_BLDM1, pdata->l1_daylight_dim); if (data->en_ambl_sens) { data->cached_daylight_max = pdata->l1_daylight_max; ret |= adp8860_write(client, ADP8860_BLMX2, pdata->l2_office_max); ret |= adp8860_write(client, ADP8860_BLDM2, pdata->l2_office_dim); ret |= adp8860_write(client, ADP8860_BLMX3, pdata->l3_dark_max); ret |= adp8860_write(client, ADP8860_BLDM3, pdata->l3_dark_dim); ret |= adp8860_write(client, ADP8860_L2_TRP, pdata->l2_trip); ret |= adp8860_write(client, ADP8860_L2_HYS, pdata->l2_hyst); ret |= adp8860_write(client, ADP8860_L3_TRP, pdata->l3_trip); ret |= adp8860_write(client, ADP8860_L3_HYS, pdata->l3_hyst); ret |= adp8860_write(client, ADP8860_CCFG, L2_EN | L3_EN | ALS_CCFG_VAL(pdata->abml_filt)); } ret |= adp8860_write(client, ADP8860_CFGR, BL_CFGR_VAL(pdata->bl_fade_law, 0)); ret |= adp8860_write(client, ADP8860_BLFR, FADE_VAL(pdata->bl_fade_in, pdata->bl_fade_out)); ret |= adp8860_set_bits(client, ADP8860_MDCR, BLEN | DIM_EN | NSTBY | (data->gdwn_dis ? GDWN_DIS : 0)); return ret; } static ssize_t adp8860_show(struct device *dev, char *buf, int reg) { struct adp8860_bl *data = dev_get_drvdata(dev); int error; uint8_t reg_val; mutex_lock(&data->lock); error = adp8860_read(data->client, reg, &reg_val); mutex_unlock(&data->lock); if (error < 0) return error; return sprintf(buf, "%u\n", reg_val); } static ssize_t adp8860_store(struct device *dev, const char *buf, size_t count, int reg) { struct adp8860_bl *data = dev_get_drvdata(dev); unsigned long val; int ret; ret = strict_strtoul(buf, 10, &val); if (ret) return ret; mutex_lock(&data->lock); adp8860_write(data->client, reg, val); mutex_unlock(&data->lock); return count; } static ssize_t adp8860_bl_l3_dark_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp8860_show(dev, buf, ADP8860_BLMX3); } static ssize_t adp8860_bl_l3_dark_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp8860_store(dev, buf, count, ADP8860_BLMX3); } static DEVICE_ATTR(l3_dark_max, 0664, adp8860_bl_l3_dark_max_show, adp8860_bl_l3_dark_max_store); static ssize_t adp8860_bl_l2_office_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp8860_show(dev, buf, ADP8860_BLMX2); } static ssize_t adp8860_bl_l2_office_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp8860_store(dev, buf, count, ADP8860_BLMX2); } static DEVICE_ATTR(l2_office_max, 0664, adp8860_bl_l2_office_max_show, adp8860_bl_l2_office_max_store); static ssize_t adp8860_bl_l1_daylight_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp8860_show(dev, buf, ADP8860_BLMX1); } static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adp8860_bl *data = dev_get_drvdata(dev); int ret = strict_strtoul(buf, 10, &data->cached_daylight_max); if (ret) return ret; return adp8860_store(dev, buf, count, ADP8860_BLMX1); } static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, adp8860_bl_l1_daylight_max_store); static ssize_t adp8860_bl_l3_dark_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp8860_show(dev, buf, ADP8860_BLDM3); } static ssize_t adp8860_bl_l3_dark_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp8860_store(dev, buf, count, ADP8860_BLDM3); } static DEVICE_ATTR(l3_dark_dim, 0664, adp8860_bl_l3_dark_dim_show, adp8860_bl_l3_dark_dim_store); static ssize_t adp8860_bl_l2_office_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp8860_show(dev, buf, ADP8860_BLDM2); } static ssize_t adp8860_bl_l2_office_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp8860_store(dev, buf, count, ADP8860_BLDM2); } static DEVICE_ATTR(l2_office_dim, 0664, adp8860_bl_l2_office_dim_show, adp8860_bl_l2_office_dim_store); static ssize_t adp8860_bl_l1_daylight_dim_show(struct device *dev, struct device_attribute *attr, char *buf) { return adp8860_show(dev, buf, ADP8860_BLDM1); } static ssize_t adp8860_bl_l1_daylight_dim_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return adp8860_store(dev, buf, count, ADP8860_BLDM1); } static DEVICE_ATTR(l1_daylight_dim, 0664, adp8860_bl_l1_daylight_dim_show, adp8860_bl_l1_daylight_dim_store); #ifdef ADP8860_EXT_FEATURES static ssize_t adp8860_bl_ambient_light_level_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adp8860_bl *data = dev_get_drvdata(dev); int error; uint8_t reg_val; uint16_t ret_val; mutex_lock(&data->lock); error = adp8860_read(data->client, ADP8860_PH1LEVL, &reg_val); ret_val = reg_val; error |= adp8860_read(data->client, ADP8860_PH1LEVH, &reg_val); mutex_unlock(&data->lock); if (error < 0) return error; /* Return 13-bit conversion value for the first light sensor */ ret_val += (reg_val & 0x1F) << 8; return sprintf(buf, "%u\n", ret_val); } static DEVICE_ATTR(ambient_light_level, 0444, adp8860_bl_ambient_light_level_show, NULL); static ssize_t adp8860_bl_ambient_light_zone_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adp8860_bl *data = dev_get_drvdata(dev); int error; uint8_t reg_val; mutex_lock(&data->lock); error = adp8860_read(data->client, ADP8860_CFGR, &reg_val); mutex_unlock(&data->lock); if (error < 0) return error; return sprintf(buf, "%u\n", ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1); } static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adp8860_bl *data = dev_get_drvdata(dev); unsigned long val; uint8_t reg_val; int ret; ret = strict_strtoul(buf, 10, &val); if (ret) return ret; if (val == 0) { /* Enable automatic ambient light sensing */ adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); } else if ((val > 0) && (val <= 3)) { /* Disable automatic ambient light sensing */ adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); /* Set user supplied ambient light zone */ mutex_lock(&data->lock); adp8860_read(data->client, ADP8860_CFGR, &reg_val); reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); reg_val |= (val - 1) << CFGR_BLV_SHIFT; adp8860_write(data->client, ADP8860_CFGR, reg_val); mutex_unlock(&data->lock); } return count; } static DEVICE_ATTR(ambient_light_zone, 0664, adp8860_bl_ambient_light_zone_show, adp8860_bl_ambient_light_zone_store); #endif static struct attribute *adp8860_bl_attributes[] = { &dev_attr_l3_dark_max.attr, &dev_attr_l3_dark_dim.attr, &dev_attr_l2_office_max.attr, &dev_attr_l2_office_dim.attr, &dev_attr_l1_daylight_max.attr, &dev_attr_l1_daylight_dim.attr, #ifdef ADP8860_EXT_FEATURES &dev_attr_ambient_light_level.attr, &dev_attr_ambient_light_zone.attr, #endif NULL }; static const struct attribute_group adp8860_bl_attr_group = { .attrs = adp8860_bl_attributes, }; static int __devinit adp8860_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct backlight_device *bl; struct adp8860_bl *data; struct adp8860_backlight_platform_data *pdata = client->dev.platform_data; struct backlight_properties props; uint8_t reg_val; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); return -EIO; } if (!pdata) { dev_err(&client->dev, "no platform data?\n"); return -EINVAL; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return -ENOMEM; ret = adp8860_read(client, ADP8860_MFDVID, &reg_val); if (ret < 0) goto out2; switch (ADP8860_MANID(reg_val)) { case ADP8863_MANUFID: data->gdwn_dis = !!pdata->gdwn_dis; case ADP8860_MANUFID: data->en_ambl_sens = !!pdata->en_ambl_sens; break; case ADP8861_MANUFID: data->gdwn_dis = !!pdata->gdwn_dis; break; default: dev_err(&client->dev, "failed to probe\n"); ret = -ENODEV; goto out2; } /* It's confirmed that the DEVID field is actually a REVID */ data->revid = ADP8860_DEVID(reg_val); data->client = client; data->pdata = pdata; data->id = id->driver_data; data->current_brightness = 0; i2c_set_clientdata(client, data); memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; props.max_brightness = ADP8860_MAX_BRIGHTNESS; mutex_init(&data->lock); bl = backlight_device_register(dev_driver_string(&client->dev), &client->dev, data, &adp8860_bl_ops, &props); if (IS_ERR(bl)) { dev_err(&client->dev, "failed to register backlight\n"); ret = PTR_ERR(bl); goto out2; } bl->props.max_brightness = bl->props.brightness = ADP8860_MAX_BRIGHTNESS; data->bl = bl; if (data->en_ambl_sens) ret = sysfs_create_group(&bl->dev.kobj, &adp8860_bl_attr_group); if (ret) { dev_err(&client->dev, "failed to register sysfs\n"); goto out1; } ret = adp8860_bl_setup(bl); if (ret) { ret = -EIO; goto out; } backlight_update_status(bl); dev_info(&client->dev, "%s Rev.%d Backlight\n", client->name, data->revid); if (pdata->num_leds) adp8860_led_probe(client); return 0; out: if (data->en_ambl_sens) sysfs_remove_group(&data->bl->dev.kobj, &adp8860_bl_attr_group); out1: backlight_device_unregister(bl); out2: kfree(data); return ret; } static int __devexit adp8860_remove(struct i2c_client *client) { struct adp8860_bl *data = i2c_get_clientdata(client); adp8860_clr_bits(client, ADP8860_MDCR, NSTBY); if (data->led) adp8860_led_remove(client); if (data->en_ambl_sens) sysfs_remove_group(&data->bl->dev.kobj, &adp8860_bl_attr_group); backlight_device_unregister(data->bl); kfree(data); return 0; } #ifdef CONFIG_PM static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message) { adp8860_clr_bits(client, ADP8860_MDCR, NSTBY); return 0; } static int adp8860_i2c_resume(struct i2c_client *client) { adp8860_set_bits(client, ADP8860_MDCR, NSTBY); return 0; } #else #define adp8860_i2c_suspend NULL #define adp8860_i2c_resume NULL #endif static const struct i2c_device_id adp8860_id[] = { { "adp8860", adp8860 }, { "adp8861", adp8861 }, { "adp8863", adp8863 }, { } }; MODULE_DEVICE_TABLE(i2c, adp8860_id); static struct i2c_driver adp8860_driver = { .driver = { .name = KBUILD_MODNAME, }, .probe = adp8860_probe, .remove = __devexit_p(adp8860_remove), .suspend = adp8860_i2c_suspend, .resume = adp8860_i2c_resume, .id_table = adp8860_id, }; static int __init adp8860_init(void) { return i2c_add_driver(&adp8860_driver); } module_init(adp8860_init); static void __exit adp8860_exit(void) { i2c_del_driver(&adp8860_driver); } module_exit(adp8860_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("ADP8860 Backlight driver"); MODULE_ALIAS("i2c:adp8860-backlight");
gpl-2.0
idryomov/btrfs-unstable
drivers/staging/iio/resolver/ad2s1210.c
2108
19687
/* * ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210 * * Copyright (c) 2010-2010 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include "ad2s1210.h" #define DRV_NAME "ad2s1210" #define AD2S1210_DEF_CONTROL 0x7E #define AD2S1210_MSB_IS_HIGH 0x80 #define AD2S1210_MSB_IS_LOW 0x7F #define AD2S1210_PHASE_LOCK_RANGE_44 0x20 #define AD2S1210_ENABLE_HYSTERESIS 0x10 #define AD2S1210_SET_ENRES1 0x08 #define AD2S1210_SET_ENRES0 0x04 #define AD2S1210_SET_RES1 0x02 #define AD2S1210_SET_RES0 0x01 #define AD2S1210_SET_ENRESOLUTION (AD2S1210_SET_ENRES1 | \ AD2S1210_SET_ENRES0) #define AD2S1210_SET_RESOLUTION (AD2S1210_SET_RES1 | AD2S1210_SET_RES0) #define AD2S1210_REG_POSITION 0x80 #define AD2S1210_REG_VELOCITY 0x82 #define AD2S1210_REG_LOS_THRD 0x88 #define AD2S1210_REG_DOS_OVR_THRD 0x89 #define AD2S1210_REG_DOS_MIS_THRD 0x8A #define AD2S1210_REG_DOS_RST_MAX_THRD 0x8B #define AD2S1210_REG_DOS_RST_MIN_THRD 0x8C #define AD2S1210_REG_LOT_HIGH_THRD 0x8D #define AD2S1210_REG_LOT_LOW_THRD 0x8E #define AD2S1210_REG_EXCIT_FREQ 0x91 #define AD2S1210_REG_CONTROL 0x92 #define AD2S1210_REG_SOFT_RESET 0xF0 #define AD2S1210_REG_FAULT 0xFF /* pin SAMPLE, A0, A1, RES0, RES1, is controlled by driver */ #define AD2S1210_SAA 3 #define AD2S1210_PN (AD2S1210_SAA + AD2S1210_RES) #define AD2S1210_MIN_CLKIN 6144000 #define AD2S1210_MAX_CLKIN 10240000 #define AD2S1210_MIN_EXCIT 2000 #define AD2S1210_MAX_EXCIT 20000 #define AD2S1210_MIN_FCW 0x4 #define AD2S1210_MAX_FCW 0x50 /* default input clock on serial interface */ #define AD2S1210_DEF_CLKIN 8192000 /* clock period in nano second */ #define AD2S1210_DEF_TCK (1000000000/AD2S1210_DEF_CLKIN) #define AD2S1210_DEF_EXCIT 10000 enum ad2s1210_mode { MOD_POS = 0, MOD_VEL, MOD_CONFIG, MOD_RESERVED, }; static const unsigned int ad2s1210_resolution_value[] = { 10, 12, 14, 16 }; struct ad2s1210_state { const struct ad2s1210_platform_data *pdata; struct mutex lock; struct spi_device *sdev; unsigned int fclkin; unsigned int fexcit; bool hysteresis; bool old_data; u8 resolution; enum ad2s1210_mode mode; u8 rx[2] ____cacheline_aligned; u8 tx[2] ____cacheline_aligned; }; static const int ad2s1210_mode_vals[4][2] = { [MOD_POS] = { 0, 0 }, [MOD_VEL] = { 0, 1 }, [MOD_CONFIG] = { 1, 0 }, }; static inline void ad2s1210_set_mode(enum ad2s1210_mode mode, struct ad2s1210_state *st) { gpio_set_value(st->pdata->a[0], ad2s1210_mode_vals[mode][0]); gpio_set_value(st->pdata->a[1], ad2s1210_mode_vals[mode][1]); st->mode = mode; } /* write 1 bytes (address or data) to the chip */ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) { int ret; ad2s1210_set_mode(MOD_CONFIG, st); st->tx[0] = data; ret = spi_write(st->sdev, st->tx, 1); if (ret < 0) return ret; st->old_data = true; return 0; } /* read value from one of the registers */ static int ad2s1210_config_read(struct ad2s1210_state *st, unsigned char address) { struct spi_transfer xfer = { .len = 2, .rx_buf = st->rx, .tx_buf = st->tx, }; int ret = 0; ad2s1210_set_mode(MOD_CONFIG, st); st->tx[0] = address | AD2S1210_MSB_IS_HIGH; st->tx[1] = AD2S1210_REG_FAULT; ret = spi_sync_transfer(st->sdev, &xfer, 1); if (ret < 0) return ret; st->old_data = true; return st->rx[1]; } static inline int ad2s1210_update_frequency_control_word(struct ad2s1210_state *st) { int ret; unsigned char fcw; fcw = (unsigned char)(st->fexcit * (1 << 15) / st->fclkin); if (fcw < AD2S1210_MIN_FCW || fcw > AD2S1210_MAX_FCW) { pr_err("ad2s1210: FCW out of range\n"); return -ERANGE; } ret = ad2s1210_config_write(st, AD2S1210_REG_EXCIT_FREQ); if (ret < 0) return ret; return ad2s1210_config_write(st, fcw); } static unsigned char ad2s1210_read_resolution_pin(struct ad2s1210_state *st) { return ad2s1210_resolution_value[ (gpio_get_value(st->pdata->res[0]) << 1) | gpio_get_value(st->pdata->res[1])]; } static const int ad2s1210_res_pins[4][2] = { { 0, 0 }, {0, 1}, {1, 0}, {1, 1} }; static inline void ad2s1210_set_resolution_pin(struct ad2s1210_state *st) { gpio_set_value(st->pdata->res[0], ad2s1210_res_pins[(st->resolution - 10)/2][0]); gpio_set_value(st->pdata->res[1], ad2s1210_res_pins[(st->resolution - 10)/2][1]); } static inline int ad2s1210_soft_reset(struct ad2s1210_state *st) { int ret; ret = ad2s1210_config_write(st, AD2S1210_REG_SOFT_RESET); if (ret < 0) return ret; return ad2s1210_config_write(st, 0x0); } static ssize_t ad2s1210_store_softreset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); int ret; mutex_lock(&st->lock); ret = ad2s1210_soft_reset(st); mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static ssize_t ad2s1210_show_fclkin(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); return sprintf(buf, "%d\n", st->fclkin); } static ssize_t ad2s1210_store_fclkin(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); unsigned long fclkin; int ret; ret = strict_strtoul(buf, 10, &fclkin); if (ret) return ret; if (fclkin < AD2S1210_MIN_CLKIN || fclkin > AD2S1210_MAX_CLKIN) { pr_err("ad2s1210: fclkin out of range\n"); return -EINVAL; } mutex_lock(&st->lock); st->fclkin = fclkin; ret = ad2s1210_update_frequency_control_word(st); if (ret < 0) goto error_ret; ret = ad2s1210_soft_reset(st); error_ret: mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static ssize_t ad2s1210_show_fexcit(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); return sprintf(buf, "%d\n", st->fexcit); } static ssize_t ad2s1210_store_fexcit(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); unsigned long fexcit; int ret; ret = strict_strtoul(buf, 10, &fexcit); if (ret < 0) return ret; if (fexcit < AD2S1210_MIN_EXCIT || fexcit > AD2S1210_MAX_EXCIT) { pr_err("ad2s1210: excitation frequency out of range\n"); return -EINVAL; } mutex_lock(&st->lock); st->fexcit = fexcit; ret = ad2s1210_update_frequency_control_word(st); if (ret < 0) goto error_ret; ret = ad2s1210_soft_reset(st); error_ret: mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static ssize_t ad2s1210_show_control(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); int ret; mutex_lock(&st->lock); ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); mutex_unlock(&st->lock); return ret < 0 ? ret : sprintf(buf, "0x%x\n", ret); } static ssize_t ad2s1210_store_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); unsigned long udata; unsigned char data; int ret; ret = strict_strtoul(buf, 16, &udata); if (ret) return -EINVAL; mutex_lock(&st->lock); ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; data = udata & AD2S1210_MSB_IS_LOW; ret = ad2s1210_config_write(st, data); if (ret < 0) goto error_ret; ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; if (ret & AD2S1210_MSB_IS_HIGH) { ret = -EIO; pr_err("ad2s1210: write control register fail\n"); goto error_ret; } st->resolution = ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION]; if (st->pdata->gpioin) { data = ad2s1210_read_resolution_pin(st); if (data != st->resolution) pr_warning("ad2s1210: resolution settings not match\n"); } else ad2s1210_set_resolution_pin(st); ret = len; st->hysteresis = !!(data & AD2S1210_ENABLE_HYSTERESIS); error_ret: mutex_unlock(&st->lock); return ret; } static ssize_t ad2s1210_show_resolution(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); return sprintf(buf, "%d\n", st->resolution); } static ssize_t ad2s1210_store_resolution(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); unsigned char data; unsigned long udata; int ret; ret = strict_strtoul(buf, 10, &udata); if (ret || udata < 10 || udata > 16) { pr_err("ad2s1210: resolution out of range\n"); return -EINVAL; } mutex_lock(&st->lock); ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; data = ret; data &= ~AD2S1210_SET_RESOLUTION; data |= (udata - 10) >> 1; ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW); if (ret < 0) goto error_ret; ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; data = ret; if (data & AD2S1210_MSB_IS_HIGH) { ret = -EIO; pr_err("ad2s1210: setting resolution fail\n"); goto error_ret; } st->resolution = ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION]; if (st->pdata->gpioin) { data = ad2s1210_read_resolution_pin(st); if (data != st->resolution) pr_warning("ad2s1210: resolution settings not match\n"); } else ad2s1210_set_resolution_pin(st); ret = len; error_ret: mutex_unlock(&st->lock); return ret; } /* read the fault register since last sample */ static ssize_t ad2s1210_show_fault(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); int ret; mutex_lock(&st->lock); ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT); mutex_unlock(&st->lock); return ret ? ret : sprintf(buf, "0x%x\n", ret); } static ssize_t ad2s1210_clear_fault(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); int ret; mutex_lock(&st->lock); gpio_set_value(st->pdata->sample, 0); /* delay (2 * tck + 20) nano seconds */ udelay(1); gpio_set_value(st->pdata->sample, 1); ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT); if (ret < 0) goto error_ret; gpio_set_value(st->pdata->sample, 0); gpio_set_value(st->pdata->sample, 1); error_ret: mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static ssize_t ad2s1210_show_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); struct iio_dev_attr *iattr = to_iio_dev_attr(attr); int ret; mutex_lock(&st->lock); ret = ad2s1210_config_read(st, iattr->address); mutex_unlock(&st->lock); return ret < 0 ? ret : sprintf(buf, "%d\n", ret); } static ssize_t ad2s1210_store_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); unsigned long data; int ret; struct iio_dev_attr *iattr = to_iio_dev_attr(attr); ret = strict_strtoul(buf, 10, &data); if (ret) return -EINVAL; mutex_lock(&st->lock); ret = ad2s1210_config_write(st, iattr->address); if (ret < 0) goto error_ret; ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW); error_ret: mutex_unlock(&st->lock); return ret < 0 ? ret : len; } static int ad2s1210_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad2s1210_state *st = iio_priv(indio_dev); bool negative; int ret = 0; u16 pos; s16 vel; mutex_lock(&st->lock); gpio_set_value(st->pdata->sample, 0); /* delay (6 * tck + 20) nano seconds */ udelay(1); switch (chan->type) { case IIO_ANGL: ad2s1210_set_mode(MOD_POS, st); break; case IIO_ANGL_VEL: ad2s1210_set_mode(MOD_VEL, st); break; default: ret = -EINVAL; break; } if (ret < 0) goto error_ret; ret = spi_read(st->sdev, st->rx, 2); if (ret < 0) goto error_ret; switch (chan->type) { case IIO_ANGL: pos = be16_to_cpup((u16 *)st->rx); if (st->hysteresis) pos >>= 16 - st->resolution; *val = pos; ret = IIO_VAL_INT; break; case IIO_ANGL_VEL: negative = st->rx[0] & 0x80; vel = be16_to_cpup((s16 *)st->rx); vel >>= 16 - st->resolution; if (vel & 0x8000) { negative = (0xffff >> st->resolution) << st->resolution; vel |= negative; } *val = vel; ret = IIO_VAL_INT; break; default: mutex_unlock(&st->lock); return -EINVAL; } error_ret: gpio_set_value(st->pdata->sample, 1); /* delay (2 * tck + 20) nano seconds */ udelay(1); mutex_unlock(&st->lock); return ret; } static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, ad2s1210_store_softreset, 0); static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR, ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0); static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR, ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0); static IIO_DEVICE_ATTR(control, S_IRUGO | S_IWUSR, ad2s1210_show_control, ad2s1210_store_control, 0); static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR, ad2s1210_show_resolution, ad2s1210_store_resolution, 0); static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, ad2s1210_show_fault, ad2s1210_clear_fault, 0); static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_LOS_THRD); static IIO_DEVICE_ATTR(dos_ovr_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_DOS_OVR_THRD); static IIO_DEVICE_ATTR(dos_mis_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_DOS_MIS_THRD); static IIO_DEVICE_ATTR(dos_rst_max_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_DOS_RST_MAX_THRD); static IIO_DEVICE_ATTR(dos_rst_min_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_DOS_RST_MIN_THRD); static IIO_DEVICE_ATTR(lot_high_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_LOT_HIGH_THRD); static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_LOT_LOW_THRD); static const struct iio_chan_spec ad2s1210_channels[] = { { .type = IIO_ANGL, .indexed = 1, .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }, { .type = IIO_ANGL_VEL, .indexed = 1, .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), } }; static struct attribute *ad2s1210_attributes[] = { &iio_dev_attr_reset.dev_attr.attr, &iio_dev_attr_fclkin.dev_attr.attr, &iio_dev_attr_fexcit.dev_attr.attr, &iio_dev_attr_control.dev_attr.attr, &iio_dev_attr_bits.dev_attr.attr, &iio_dev_attr_fault.dev_attr.attr, &iio_dev_attr_los_thrd.dev_attr.attr, &iio_dev_attr_dos_ovr_thrd.dev_attr.attr, &iio_dev_attr_dos_mis_thrd.dev_attr.attr, &iio_dev_attr_dos_rst_max_thrd.dev_attr.attr, &iio_dev_attr_dos_rst_min_thrd.dev_attr.attr, &iio_dev_attr_lot_high_thrd.dev_attr.attr, &iio_dev_attr_lot_low_thrd.dev_attr.attr, NULL, }; static const struct attribute_group ad2s1210_attribute_group = { .attrs = ad2s1210_attributes, }; static int ad2s1210_initial(struct ad2s1210_state *st) { unsigned char data; int ret; mutex_lock(&st->lock); if (st->pdata->gpioin) st->resolution = ad2s1210_read_resolution_pin(st); else ad2s1210_set_resolution_pin(st); ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; data = AD2S1210_DEF_CONTROL & ~(AD2S1210_SET_RESOLUTION); data |= (st->resolution - 10) >> 1; ret = ad2s1210_config_write(st, data); if (ret < 0) goto error_ret; ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL); if (ret < 0) goto error_ret; if (ret & AD2S1210_MSB_IS_HIGH) { ret = -EIO; goto error_ret; } ret = ad2s1210_update_frequency_control_word(st); if (ret < 0) goto error_ret; ret = ad2s1210_soft_reset(st); error_ret: mutex_unlock(&st->lock); return ret; } static const struct iio_info ad2s1210_info = { .read_raw = &ad2s1210_read_raw, .attrs = &ad2s1210_attribute_group, .driver_module = THIS_MODULE, }; static int ad2s1210_setup_gpios(struct ad2s1210_state *st) { unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT; struct gpio ad2s1210_gpios[] = { { st->pdata->sample, GPIOF_DIR_IN, "sample" }, { st->pdata->a[0], flags, "a0" }, { st->pdata->a[1], flags, "a1" }, { st->pdata->res[0], flags, "res0" }, { st->pdata->res[0], flags, "res1" }, }; return gpio_request_array(ad2s1210_gpios, ARRAY_SIZE(ad2s1210_gpios)); } static void ad2s1210_free_gpios(struct ad2s1210_state *st) { unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT; struct gpio ad2s1210_gpios[] = { { st->pdata->sample, GPIOF_DIR_IN, "sample" }, { st->pdata->a[0], flags, "a0" }, { st->pdata->a[1], flags, "a1" }, { st->pdata->res[0], flags, "res0" }, { st->pdata->res[0], flags, "res1" }, }; gpio_free_array(ad2s1210_gpios, ARRAY_SIZE(ad2s1210_gpios)); } static int ad2s1210_probe(struct spi_device *spi) { struct iio_dev *indio_dev; struct ad2s1210_state *st; int ret; if (spi->dev.platform_data == NULL) return -EINVAL; indio_dev = iio_device_alloc(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); st->pdata = spi->dev.platform_data; ret = ad2s1210_setup_gpios(st); if (ret < 0) goto error_free_dev; spi_set_drvdata(spi, indio_dev); mutex_init(&st->lock); st->sdev = spi; st->hysteresis = true; st->mode = MOD_CONFIG; st->resolution = 12; st->fexcit = AD2S1210_DEF_EXCIT; indio_dev->dev.parent = &spi->dev; indio_dev->info = &ad2s1210_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = ad2s1210_channels; indio_dev->num_channels = ARRAY_SIZE(ad2s1210_channels); indio_dev->name = spi_get_device_id(spi)->name; ret = iio_device_register(indio_dev); if (ret) goto error_free_gpios; st->fclkin = spi->max_speed_hz; spi->mode = SPI_MODE_3; spi_setup(spi); ad2s1210_initial(st); return 0; error_free_gpios: ad2s1210_free_gpios(st); error_free_dev: iio_device_free(indio_dev); error_ret: return ret; } static int ad2s1210_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); iio_device_unregister(indio_dev); ad2s1210_free_gpios(iio_priv(indio_dev)); iio_device_free(indio_dev); return 0; } static const struct spi_device_id ad2s1210_id[] = { { "ad2s1210" }, {} }; MODULE_DEVICE_TABLE(spi, ad2s1210_id); static struct spi_driver ad2s1210_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ad2s1210_probe, .remove = ad2s1210_remove, .id_table = ad2s1210_id, }; module_spi_driver(ad2s1210_driver); MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>"); MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TheWhisp/android_kernel_samsung_kyle
drivers/net/can/mscan/mpc5xxx_can.c
2108
11635
/* * CAN bus driver for the Freescale MPC5xxx embedded CPU. * * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>, * Varma Electronics Oy * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/can/dev.h> #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/mpc52xx.h> #include "mscan.h" #define DRV_NAME "mpc5xxx_can" struct mpc5xxx_can_data { unsigned int type; u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc); }; #ifdef CONFIG_PPC_MPC52xx static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = { { .compatible = "fsl,mpc5200-cdm", }, {} }; static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc) { unsigned int pvr; struct mpc52xx_cdm __iomem *cdm; struct device_node *np_cdm; unsigned int freq; u32 val; pvr = mfspr(SPRN_PVR); /* * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock * (IP_CLK) can be selected as MSCAN clock source. According to * the MPC5200 user's manual, the oscillator clock is the better * choice as it has less jitter. For this reason, it is selected * by default. Unfortunately, it can not be selected for the old * MPC5200 Rev. A chips due to a hardware bug (check errata). */ if (clock_name && strcmp(clock_name, "ip") == 0) *mscan_clksrc = MSCAN_CLKSRC_BUS; else *mscan_clksrc = MSCAN_CLKSRC_XTAL; freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); if (!freq) return 0; if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011) return freq; /* Determine SYS_XTAL_IN frequency from the clock domain settings */ np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids); if (!np_cdm) { dev_err(&ofdev->dev, "can't get clock node!\n"); return 0; } cdm = of_iomap(np_cdm, 0); if (in_8(&cdm->ipb_clk_sel) & 0x1) freq *= 2; val = in_be32(&cdm->rstcfg); freq *= (val & (1 << 5)) ? 8 : 4; freq /= (val & (1 << 6)) ? 12 : 16; of_node_put(np_cdm); iounmap(cdm); return freq; } #else /* !CONFIG_PPC_MPC52xx */ static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc) { return 0; } #endif /* CONFIG_PPC_MPC52xx */ #ifdef CONFIG_PPC_MPC512x struct mpc512x_clockctl { u32 spmr; /* System PLL Mode Reg */ u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */ u32 scfr1; /* System Clk Freq Reg 1 */ u32 scfr2; /* System Clk Freq Reg 2 */ u32 reserved; u32 bcr; /* Bread Crumb Reg */ u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */ u32 spccr; /* SPDIF Clk Ctrl Reg */ u32 cccr; /* CFM Clk Ctrl Reg */ u32 dccr; /* DIU Clk Cnfg Reg */ u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */ }; static struct of_device_id __devinitdata mpc512x_clock_ids[] = { { .compatible = "fsl,mpc5121-clock", }, {} }; static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc) { struct mpc512x_clockctl __iomem *clockctl; struct device_node *np_clock; struct clk *sys_clk, *ref_clk; int plen, clockidx, clocksrc = -1; u32 sys_freq, val, clockdiv = 1, freq = 0; const u32 *pval; np_clock = of_find_matching_node(NULL, mpc512x_clock_ids); if (!np_clock) { dev_err(&ofdev->dev, "couldn't find clock node\n"); return 0; } clockctl = of_iomap(np_clock, 0); if (!clockctl) { dev_err(&ofdev->dev, "couldn't map clock registers\n"); goto exit_put; } /* Determine the MSCAN device index from the physical address */ pval = of_get_property(ofdev->dev.of_node, "reg", &plen); BUG_ON(!pval || plen < sizeof(*pval)); clockidx = (*pval & 0x80) ? 1 : 0; if (*pval & 0x2000) clockidx += 2; /* * Clock source and divider selection: 3 different clock sources * can be selected: "ip", "ref" or "sys". For the latter two, a * clock divider can be defined as well. If the clock source is * not specified by the device tree, we first try to find an * optimal CAN source clock based on the system clock. If that * is not posslible, the reference clock will be used. */ if (clock_name && !strcmp(clock_name, "ip")) { *mscan_clksrc = MSCAN_CLKSRC_IPS; freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); } else { *mscan_clksrc = MSCAN_CLKSRC_BUS; pval = of_get_property(ofdev->dev.of_node, "fsl,mscan-clock-divider", &plen); if (pval && plen == sizeof(*pval)) clockdiv = *pval; if (!clockdiv) clockdiv = 1; if (!clock_name || !strcmp(clock_name, "sys")) { sys_clk = clk_get(&ofdev->dev, "sys_clk"); if (!sys_clk) { dev_err(&ofdev->dev, "couldn't get sys_clk\n"); goto exit_unmap; } /* Get and round up/down sys clock rate */ sys_freq = 1000000 * ((clk_get_rate(sys_clk) + 499999) / 1000000); if (!clock_name) { /* A multiple of 16 MHz would be optimal */ if ((sys_freq % 16000000) == 0) { clocksrc = 0; clockdiv = sys_freq / 16000000; freq = sys_freq / clockdiv; } } else { clocksrc = 0; freq = sys_freq / clockdiv; } } if (clocksrc < 0) { ref_clk = clk_get(&ofdev->dev, "ref_clk"); if (!ref_clk) { dev_err(&ofdev->dev, "couldn't get ref_clk\n"); goto exit_unmap; } clocksrc = 1; freq = clk_get_rate(ref_clk) / clockdiv; } } /* Disable clock */ out_be32(&clockctl->mccr[clockidx], 0x0); if (clocksrc >= 0) { /* Set source and divider */ val = (clocksrc << 14) | ((clockdiv - 1) << 17); out_be32(&clockctl->mccr[clockidx], val); /* Enable clock */ out_be32(&clockctl->mccr[clockidx], val | 0x10000); } /* Enable MSCAN clock domain */ val = in_be32(&clockctl->sccr[1]); if (!(val & (1 << 25))) out_be32(&clockctl->sccr[1], val | (1 << 25)); dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n", *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" : clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv); exit_unmap: iounmap(clockctl); exit_put: of_node_put(np_clock); return freq; } #else /* !CONFIG_PPC_MPC512x */ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc) { return 0; } #endif /* CONFIG_PPC_MPC512x */ static struct of_device_id mpc5xxx_can_table[]; static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) { const struct of_device_id *match; struct mpc5xxx_can_data *data; struct device_node *np = ofdev->dev.of_node; struct net_device *dev; struct mscan_priv *priv; void __iomem *base; const char *clock_name = NULL; int irq, mscan_clksrc = 0; int err = -ENOMEM; match = of_match_device(mpc5xxx_can_table, &ofdev->dev); if (!match) return -EINVAL; data = match->data; base = of_iomap(np, 0); if (!base) { dev_err(&ofdev->dev, "couldn't ioremap\n"); return err; } irq = irq_of_parse_and_map(np, 0); if (!irq) { dev_err(&ofdev->dev, "no irq found\n"); err = -ENODEV; goto exit_unmap_mem; } dev = alloc_mscandev(); if (!dev) goto exit_dispose_irq; priv = netdev_priv(dev); priv->reg_base = base; dev->irq = irq; clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL); BUG_ON(!data); priv->type = data->type; priv->can.clock.freq = data->get_clock(ofdev, clock_name, &mscan_clksrc); if (!priv->can.clock.freq) { dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n"); goto exit_free_mscan; } SET_NETDEV_DEV(dev, &ofdev->dev); err = register_mscandev(dev, mscan_clksrc); if (err) { dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_free_mscan; } dev_set_drvdata(&ofdev->dev, dev); dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", priv->reg_base, dev->irq, priv->can.clock.freq); return 0; exit_free_mscan: free_candev(dev); exit_dispose_irq: irq_dispose_mapping(irq); exit_unmap_mem: iounmap(base); return err; } static int __devexit mpc5xxx_can_remove(struct platform_device *ofdev) { struct net_device *dev = dev_get_drvdata(&ofdev->dev); struct mscan_priv *priv = netdev_priv(dev); dev_set_drvdata(&ofdev->dev, NULL); unregister_mscandev(dev); iounmap(priv->reg_base); irq_dispose_mapping(dev->irq); free_candev(dev); return 0; } #ifdef CONFIG_PM static struct mscan_regs saved_regs; static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state) { struct net_device *dev = dev_get_drvdata(&ofdev->dev); struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; _memcpy_fromio(&saved_regs, regs, sizeof(*regs)); return 0; } static int mpc5xxx_can_resume(struct platform_device *ofdev) { struct net_device *dev = dev_get_drvdata(&ofdev->dev); struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; regs->canctl0 |= MSCAN_INITRQ; while (!(regs->canctl1 & MSCAN_INITAK)) udelay(10); regs->canctl1 = saved_regs.canctl1; regs->canbtr0 = saved_regs.canbtr0; regs->canbtr1 = saved_regs.canbtr1; regs->canidac = saved_regs.canidac; /* restore masks, buffers etc. */ _memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0, sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0)); regs->canctl0 &= ~MSCAN_INITRQ; regs->cantbsel = saved_regs.cantbsel; regs->canrier = saved_regs.canrier; regs->cantier = saved_regs.cantier; regs->canctl0 = saved_regs.canctl0; return 0; } #endif static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = { .type = MSCAN_TYPE_MPC5200, .get_clock = mpc52xx_can_get_clock, }; static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = { .type = MSCAN_TYPE_MPC5121, .get_clock = mpc512x_can_get_clock, }; static struct of_device_id __devinitdata mpc5xxx_can_table[] = { { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, }, /* Note that only MPC5121 Rev. 2 (and later) is supported */ { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, }, {}, }; static struct platform_driver mpc5xxx_can_driver = { .driver = { .name = "mpc5xxx_can", .owner = THIS_MODULE, .of_match_table = mpc5xxx_can_table, }, .probe = mpc5xxx_can_probe, .remove = __devexit_p(mpc5xxx_can_remove), #ifdef CONFIG_PM .suspend = mpc5xxx_can_suspend, .resume = mpc5xxx_can_resume, #endif }; static int __init mpc5xxx_can_init(void) { return platform_driver_register(&mpc5xxx_can_driver); } module_init(mpc5xxx_can_init); static void __exit mpc5xxx_can_exit(void) { platform_driver_unregister(&mpc5xxx_can_driver); }; module_exit(mpc5xxx_can_exit); MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
StarKissed/starkissed-kernel-mecha
drivers/net/wireless/iwlegacy/iwl4965-base.c
2364
104245
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/wireless.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <net/mac80211.h> #include <asm/div64.h> #define DRV_NAME "iwl4965" #include "iwl-eeprom.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" #include "iwl-helpers.h" #include "iwl-sta.h" #include "iwl-4965-calib.h" #include "iwl-4965.h" #include "iwl-4965-led.h" /****************************************************************************** * * module boiler plate * ******************************************************************************/ /* * module name, copyright, version, etc. */ #define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux" #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG #define VD "d" #else #define VD #endif #define DRV_VERSION IWLWIFI_VERSION VD MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_LICENSE("GPL"); MODULE_ALIAS("iwl4965"); void iwl4965_update_chain_flags(struct iwl_priv *priv) { struct iwl_rxon_context *ctx; if (priv->cfg->ops->hcmd->set_rxon_chain) { for_each_context(priv, ctx) { priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); if (ctx->active.rx_chain != ctx->staging.rx_chain) iwl_legacy_commit_rxon(priv, ctx); } } } static void iwl4965_clear_free_frames(struct iwl_priv *priv) { struct list_head *element; IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", priv->frames_count); while (!list_empty(&priv->free_frames)) { element = priv->free_frames.next; list_del(element); kfree(list_entry(element, struct iwl_frame, list)); priv->frames_count--; } if (priv->frames_count) { IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", priv->frames_count); priv->frames_count = 0; } } static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv) { struct iwl_frame *frame; struct list_head *element; if (list_empty(&priv->free_frames)) { frame = kzalloc(sizeof(*frame), GFP_KERNEL); if (!frame) { IWL_ERR(priv, "Could not allocate frame!\n"); return NULL; } priv->frames_count++; return frame; } element = priv->free_frames.next; list_del(element); return list_entry(element, struct iwl_frame, list); } static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) { memset(frame, 0, sizeof(*frame)); list_add(&frame->list, &priv->free_frames); } static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv, struct ieee80211_hdr *hdr, int left) { lockdep_assert_held(&priv->mutex); if (!priv->beacon_skb) return 0; if (priv->beacon_skb->len > left) return 0; memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); return priv->beacon_skb->len; } /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ static void iwl4965_set_beacon_tim(struct iwl_priv *priv, struct iwl_tx_beacon_cmd *tx_beacon_cmd, u8 *beacon, u32 frame_size) { u16 tim_idx; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; /* * The index is relative to frame start but we start looking at the * variable-length part of the beacon. */ tim_idx = mgmt->u.beacon.variable - beacon; /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ while ((tim_idx < (frame_size - 2)) && (beacon[tim_idx] != WLAN_EID_TIM)) tim_idx += beacon[tim_idx+1] + 2; /* If TIM field was found, set variables */ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); tx_beacon_cmd->tim_size = beacon[tim_idx+1]; } else IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); } static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, struct iwl_frame *frame) { struct iwl_tx_beacon_cmd *tx_beacon_cmd; u32 frame_size; u32 rate_flags; u32 rate; /* * We have to set up the TX command, the TX Beacon command, and the * beacon contents. */ lockdep_assert_held(&priv->mutex); if (!priv->beacon_ctx) { IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); return 0; } /* Initialize memory */ tx_beacon_cmd = &frame->u.beacon; memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); /* Set up TX beacon contents */ frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame, sizeof(frame->u) - sizeof(*tx_beacon_cmd)); if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) return 0; if (!frame_size) return 0; /* Set up TX command fields */ tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; /* Set up TX beacon command fields */ iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, frame_size); /* Set up packet rate and flags */ rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx); priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, priv->hw_params.valid_tx_ant); rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) rate_flags |= RATE_MCS_CCK_MSK; tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags); return sizeof(*tx_beacon_cmd) + frame_size; } int iwl4965_send_beacon_cmd(struct iwl_priv *priv) { struct iwl_frame *frame; unsigned int frame_size; int rc; frame = iwl4965_get_free_frame(priv); if (!frame) { IWL_ERR(priv, "Could not obtain free frame buffer for beacon " "command.\n"); return -ENOMEM; } frame_size = iwl4965_hw_get_beacon_cmd(priv, frame); if (!frame_size) { IWL_ERR(priv, "Error configuring the beacon command\n"); iwl4965_free_frame(priv, frame); return -EINVAL; } rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, &frame->u.cmd[0]); iwl4965_free_frame(priv, frame); return rc; } static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) { struct iwl_tfd_tb *tb = &tfd->tbs[idx]; dma_addr_t addr = get_unaligned_le32(&tb->lo); if (sizeof(dma_addr_t) > sizeof(u32)) addr |= ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; return addr; } static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) { struct iwl_tfd_tb *tb = &tfd->tbs[idx]; return le16_to_cpu(tb->hi_n_len) >> 4; } static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, dma_addr_t addr, u16 len) { struct iwl_tfd_tb *tb = &tfd->tbs[idx]; u16 hi_n_len = len << 4; put_unaligned_le32(addr, &tb->lo); if (sizeof(dma_addr_t) > sizeof(u32)) hi_n_len |= ((addr >> 16) >> 16) & 0xF; tb->hi_n_len = cpu_to_le16(hi_n_len); tfd->num_tbs = idx + 1; } static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd) { return tfd->num_tbs & 0x1f; } /** * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @priv - driver private data * @txq - tx queue * * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) { struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; struct iwl_tfd *tfd; struct pci_dev *dev = priv->pci_dev; int index = txq->q.read_ptr; int i; int num_tbs; tfd = &tfd_tmp[index]; /* Sanity check on number of chunks */ num_tbs = iwl4965_tfd_get_num_tbs(tfd); if (num_tbs >= IWL_NUM_OF_TBS) { IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); /* @todo issue fatal error, it is quite serious situation */ return; } /* Unmap tx_cmd */ if (num_tbs) pci_unmap_single(dev, dma_unmap_addr(&txq->meta[index], mapping), dma_unmap_len(&txq->meta[index], len), PCI_DMA_BIDIRECTIONAL); /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i), iwl4965_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); /* free SKB */ if (txq->txb) { struct sk_buff *skb; skb = txq->txb[txq->q.read_ptr].skb; /* can be called from irqs-disabled context */ if (skb) { dev_kfree_skb_any(skb); txq->txb[txq->q.read_ptr].skb = NULL; } } } int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, dma_addr_t addr, u16 len, u8 reset, u8 pad) { struct iwl_queue *q; struct iwl_tfd *tfd, *tfd_tmp; u32 num_tbs; q = &txq->q; tfd_tmp = (struct iwl_tfd *)txq->tfds; tfd = &tfd_tmp[q->write_ptr]; if (reset) memset(tfd, 0, sizeof(*tfd)); num_tbs = iwl4965_tfd_get_num_tbs(tfd); /* Each TFD can point to a maximum 20 Tx buffers */ if (num_tbs >= IWL_NUM_OF_TBS) { IWL_ERR(priv, "Error can not send more than %d chunks\n", IWL_NUM_OF_TBS); return -EINVAL; } BUG_ON(addr & ~DMA_BIT_MASK(36)); if (unlikely(addr & ~IWL_TX_DMA_MASK)) IWL_ERR(priv, "Unaligned address = %llx\n", (unsigned long long)addr); iwl4965_tfd_set_tb(tfd, num_tbs, addr, len); return 0; } /* * Tell nic where to find circular buffer of Tx Frame Descriptors for * given Tx queue, and enable the DMA channel used for that queue. * * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA * channels supported in hardware. */ int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) { int txq_id = txq->q.id; /* Circular buffer (TFD queue in DRAM) physical base address */ iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8); return 0; } /****************************************************************************** * * Generic RX handler implementations * ******************************************************************************/ static void iwl4965_rx_reply_alive(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_alive_resp *palive; struct delayed_work *pwork; palive = &pkt->u.alive_frame; IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", palive->is_valid, palive->ver_type, palive->ver_subtype); if (palive->ver_subtype == INITIALIZE_SUBTYPE) { IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); memcpy(&priv->card_alive_init, &pkt->u.alive_frame, sizeof(struct iwl_init_alive_resp)); pwork = &priv->init_alive_start; } else { IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); memcpy(&priv->card_alive, &pkt->u.alive_frame, sizeof(struct iwl_alive_resp)); pwork = &priv->alive_start; } /* We delay the ALIVE response by 5ms to * give the HW RF Kill time to activate... */ if (palive->is_valid == UCODE_VALID_OK) queue_delayed_work(priv->workqueue, pwork, msecs_to_jiffies(5)); else IWL_WARN(priv, "uCode did not respond OK.\n"); } /** * iwl4965_bg_statistics_periodic - Timer callback to queue statistics * * This callback is provided in order to send a statistics request. * * This timer function is continually reset to execute within * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION * was received. We need to ensure we receive the statistics in order * to update the temperature used for calibrating the TXPOWER. */ static void iwl4965_bg_statistics_periodic(unsigned long data) { struct iwl_priv *priv = (struct iwl_priv *)data; if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; /* dont send host command if rf-kill is on */ if (!iwl_legacy_is_ready_rf(priv)) return; iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false); } static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base, u32 start_idx, u32 num_events, u32 mode) { u32 i; u32 ptr; /* SRAM byte address of log data */ u32 ev, time, data; /* event log data */ unsigned long reg_flags; if (mode == 0) ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32)); else ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); /* Make sure device is powered up for SRAM reads */ spin_lock_irqsave(&priv->reg_lock, reg_flags); if (iwl_grab_nic_access(priv)) { spin_unlock_irqrestore(&priv->reg_lock, reg_flags); return; } /* Set starting address; reads will auto-increment */ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); rmb(); /* * "time" is actually "data" for mode 0 (no timestamp). * place event id # at far right for easier visual parsing. */ for (i = 0; i < num_events; i++) { ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); if (mode == 0) { trace_iwlwifi_legacy_dev_ucode_cont_event(priv, 0, time, ev); } else { data = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); trace_iwlwifi_legacy_dev_ucode_cont_event(priv, time, data, ev); } } /* Allow device to power down */ iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, reg_flags); } static void iwl4965_continuous_event_trace(struct iwl_priv *priv) { u32 capacity; /* event log capacity in # entries */ u32 base; /* SRAM byte address of event log header */ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ u32 num_wraps; /* # times uCode wrapped to top of log */ u32 next_entry; /* index of next entry to be written by uCode */ if (priv->ucode_type == UCODE_INIT) base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); else base = le32_to_cpu(priv->card_alive.log_event_table_ptr); if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { capacity = iwl_legacy_read_targ_mem(priv, base); num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32))); mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32))); } else return; if (num_wraps == priv->event_log.num_wraps) { iwl4965_print_cont_event_trace(priv, base, priv->event_log.next_entry, next_entry - priv->event_log.next_entry, mode); priv->event_log.non_wraps_count++; } else { if ((num_wraps - priv->event_log.num_wraps) > 1) priv->event_log.wraps_more_count++; else priv->event_log.wraps_once_count++; trace_iwlwifi_legacy_dev_ucode_wrap_event(priv, num_wraps - priv->event_log.num_wraps, next_entry, priv->event_log.next_entry); if (next_entry < priv->event_log.next_entry) { iwl4965_print_cont_event_trace(priv, base, priv->event_log.next_entry, capacity - priv->event_log.next_entry, mode); iwl4965_print_cont_event_trace(priv, base, 0, next_entry, mode); } else { iwl4965_print_cont_event_trace(priv, base, next_entry, capacity - next_entry, mode); iwl4965_print_cont_event_trace(priv, base, 0, next_entry, mode); } } priv->event_log.num_wraps = num_wraps; priv->event_log.next_entry = next_entry; } /** * iwl4965_bg_ucode_trace - Timer callback to log ucode event * * The timer is continually set to execute every * UCODE_TRACE_PERIOD milliseconds after the last timer expired * this function is to perform continuous uCode event logging operation * if enabled */ static void iwl4965_bg_ucode_trace(unsigned long data) { struct iwl_priv *priv = (struct iwl_priv *)data; if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; if (priv->event_log.ucode_trace) { iwl4965_continuous_event_trace(priv); /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */ mod_timer(&priv->ucode_trace, jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); } } static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl4965_beacon_notif *beacon = (struct iwl4965_beacon_notif *)pkt->u.raw; #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " "tsf %d %d rate %d\n", le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, beacon->beacon_notify_hdr.failure_frame, le32_to_cpu(beacon->ibss_mgr_status), le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); #endif priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); } static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv) { unsigned long flags; IWL_DEBUG_POWER(priv, "Stop all queues\n"); if (priv->mac80211_registered) ieee80211_stop_queues(priv->hw); iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); iwl_read32(priv, CSR_UCODE_DRV_GP1); spin_lock_irqsave(&priv->reg_lock, flags); if (!iwl_grab_nic_access(priv)) iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, flags); } /* Handle notification from uCode that card's power state is changing * due to software, hardware, or critical temperature RFKILL */ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); unsigned long status = priv->status; IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", (flags & HW_CARD_DISABLED) ? "Kill" : "On", (flags & SW_CARD_DISABLED) ? "Kill" : "On", (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached"); if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) { iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); if (!(flags & RXON_CARD_DISABLED)) { iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); } } if (flags & CT_CARD_DISABLED) iwl4965_perform_ct_kill_task(priv); if (flags & HW_CARD_DISABLED) set_bit(STATUS_RF_KILL_HW, &priv->status); else clear_bit(STATUS_RF_KILL_HW, &priv->status); if (!(flags & RXON_CARD_DISABLED)) iwl_legacy_scan_cancel(priv); if ((test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))) wiphy_rfkill_set_hw_state(priv->hw->wiphy, test_bit(STATUS_RF_KILL_HW, &priv->status)); else wake_up(&priv->wait_command_queue); } /** * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks * * Setup the RX handlers for each of the reply types sent from the uCode * to the host. * * This function chains into the hardware specific files for them to setup * any hardware specific handlers as well. */ static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) { priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_legacy_rx_spectrum_measure_notif; priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_legacy_rx_pm_debug_statistics_notif; priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; /* * The same handler is used for both the REPLY to a discrete * statistics request from the host as well as for the periodic * statistics notifications (after received beacons) from the uCode. */ priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics; priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics; iwl_legacy_setup_rx_scan_handlers(priv); /* status change handler */ priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif; priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = iwl4965_rx_missed_beacon_notif; /* Rx handlers */ priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx; /* block ack */ priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; /* Set up hardware specific Rx handlers */ priv->cfg->ops->lib->rx_handler_setup(priv); } /** * iwl4965_rx_handle - Main entry function for receiving responses from uCode * * Uses the priv->rx_handlers callback function array to invoke * the appropriate handlers, including command responses, * frame-received notifications, and other notifications. */ void iwl4965_rx_handle(struct iwl_priv *priv) { struct iwl_rx_mem_buffer *rxb; struct iwl_rx_packet *pkt; struct iwl_rx_queue *rxq = &priv->rxq; u32 r, i; int reclaim; unsigned long flags; u8 fill_rx = 0; u32 count = 8; int total_empty; /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; i = rxq->read; /* Rx interrupt, but nothing sent from uCode */ if (i == r) IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); /* calculate total frames need to be restock after handling RX */ total_empty = r - rxq->write_actual; if (total_empty < 0) total_empty += RX_QUEUE_SIZE; if (total_empty > (RX_QUEUE_SIZE / 2)) fill_rx = 1; while (i != r) { int len; rxb = rxq->queue[i]; /* If an RXB doesn't have a Rx queue slot associated with it, * then a bug has been introduced in the queue refilling * routines -- catch it here */ BUG_ON(rxb == NULL); rxq->queue[i] = NULL; pci_unmap_page(priv->pci_dev, rxb->page_dma, PAGE_SIZE << priv->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); pkt = rxb_addr(rxb); len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; len += sizeof(u32); /* account for status word */ trace_iwlwifi_legacy_dev_rx(priv, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. * If the packet (e.g. Rx frame) originated from uCode, * there is no command buffer to reclaim. * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && (pkt->hdr.cmd != REPLY_RX) && (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && (pkt->hdr.cmd != REPLY_TX); /* Based on type of command response or notification, * handle those that need handling via function in * rx_handlers table. See iwl4965_setup_rx_handlers() */ if (priv->rx_handlers[pkt->hdr.cmd]) { IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); } else { /* No handling needed */ IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n", r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); } /* * XXX: After here, we should always check rxb->page * against NULL before touching it or its virtual * memory (pkt). Because some rx_handler might have * already taken or freed the pages. */ if (reclaim) { /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking iwl_legacy_send_cmd() * as we reclaim the driver command queue */ if (rxb->page) iwl_legacy_tx_cmd_complete(priv, rxb); else IWL_WARN(priv, "Claim null rxb?\n"); } /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, 0, PAGE_SIZE << priv->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); i = (i + 1) & RX_QUEUE_MASK; /* If there are a lot of unused frames, * restock the Rx queue so ucode wont assert. */ if (fill_rx) { count++; if (count >= 8) { rxq->read = i; iwl4965_rx_replenish_now(priv); count = 0; } } } /* Backtrack one entry */ rxq->read = i; if (fill_rx) iwl4965_rx_replenish_now(priv); else iwl4965_rx_queue_restock(priv); } /* call this function to flush any scheduled tasklet */ static inline void iwl4965_synchronize_irq(struct iwl_priv *priv) { /* wait to make sure we flush pending tasklet*/ synchronize_irq(priv->pci_dev->irq); tasklet_kill(&priv->irq_tasklet); } static void iwl4965_irq_tasklet(struct iwl_priv *priv) { u32 inta, handled = 0; u32 inta_fh; unsigned long flags; u32 i; #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG u32 inta_mask; #endif spin_lock_irqsave(&priv->lock, flags); /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, * and will clear only when CSR_FH_INT_STATUS gets cleared. */ inta = iwl_read32(priv, CSR_INT); iwl_write32(priv, CSR_INT, inta); /* Ack/clear/reset pending flow-handler (DMA) interrupts. * Any new interrupts that happen after this, either while we're * in this tasklet, or later, will show up in next ISR/tasklet. */ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { /* just for debug */ inta_mask = iwl_read32(priv, CSR_INT_MASK); IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask, inta_fh); } #endif spin_unlock_irqrestore(&priv->lock, flags); /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not * atomic, make sure that inta covers all the interrupts that * we've discovered, even if FH interrupt came in just after * reading CSR_INT. */ if (inta_fh & CSR49_FH_INT_RX_MASK) inta |= CSR_INT_BIT_FH_RX; if (inta_fh & CSR49_FH_INT_TX_MASK) inta |= CSR_INT_BIT_FH_TX; /* Now service all interrupt bits discovered above. */ if (inta & CSR_INT_BIT_HW_ERR) { IWL_ERR(priv, "Hardware error detected. Restarting.\n"); /* Tell the device to stop sending interrupts */ iwl_legacy_disable_interrupts(priv); priv->isr_stats.hw++; iwl_legacy_irq_handle_error(priv); handled |= CSR_INT_BIT_HW_ERR; return; } #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " "the frame/frames.\n"); priv->isr_stats.sch++; } /* Alive notification via Rx interrupt will do the real work */ if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(priv, "Alive interrupt\n"); priv->isr_stats.alive++; } } #endif /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); /* HW RF KILL switch toggled */ if (inta & CSR_INT_BIT_RF_KILL) { int hw_rf_kill = 0; if (!(iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) hw_rf_kill = 1; IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", hw_rf_kill ? "disable radio" : "enable radio"); priv->isr_stats.rfkill++; /* driver only loads ucode once setting the interface up. * the driver allows loading the ucode even if the radio * is killed. Hence update the killswitch state here. The * rfkill handler will care about restarting if needed. */ if (!test_bit(STATUS_ALIVE, &priv->status)) { if (hw_rf_kill) set_bit(STATUS_RF_KILL_HW, &priv->status); else clear_bit(STATUS_RF_KILL_HW, &priv->status); wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); } handled |= CSR_INT_BIT_RF_KILL; } /* Chip got too hot and stopped itself */ if (inta & CSR_INT_BIT_CT_KILL) { IWL_ERR(priv, "Microcode CT kill error detected.\n"); priv->isr_stats.ctkill++; handled |= CSR_INT_BIT_CT_KILL; } /* Error detected by uCode */ if (inta & CSR_INT_BIT_SW_ERR) { IWL_ERR(priv, "Microcode SW error detected. " " Restarting 0x%X.\n", inta); priv->isr_stats.sw++; iwl_legacy_irq_handle_error(priv); handled |= CSR_INT_BIT_SW_ERR; } /* * uCode wakes up after power-down sleep. * Tell device about any new tx or host commands enqueued, * and about any Rx buffers made available while asleep. */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); for (i = 0; i < priv->hw_params.max_txq_num; i++) iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]); priv->isr_stats.wakeup++; handled |= CSR_INT_BIT_WAKEUP; } /* All uCode command responses, including Tx command responses, * Rx "responses" (frame-received notification), and other * notifications from uCode come through here*/ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { iwl4965_rx_handle(priv); priv->isr_stats.rx++; handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); } /* This "Tx" DMA channel is used only for loading uCode */ if (inta & CSR_INT_BIT_FH_TX) { IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); priv->isr_stats.tx++; handled |= CSR_INT_BIT_FH_TX; /* Wake up uCode load routine, now that load is complete */ priv->ucode_write_complete = 1; wake_up(&priv->wait_command_queue); } if (inta & ~handled) { IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); priv->isr_stats.unhandled++; } if (inta & ~(priv->inta_mask)) { IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", inta & ~priv->inta_mask); IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); } /* Re-enable all interrupts */ /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_legacy_enable_interrupts(priv); /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_legacy_enable_rfkill_int(priv); #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { inta = iwl_read32(priv, CSR_INT); inta_mask = iwl_read32(priv, CSR_INT_MASK); inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); } #endif } /***************************************************************************** * * sysfs attributes * *****************************************************************************/ #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG /* * The following adds a new attribute to the sysfs representation * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) * used for controlling the debug level. * * See the level definitions in iwl for details. * * The debug_level being managed using sysfs below is a per device debug * level that is used instead of the global debug level if it (the per * device debug level) is set. */ static ssize_t iwl4965_show_debug_level(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); } static ssize_t iwl4965_store_debug_level(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct iwl_priv *priv = dev_get_drvdata(d); unsigned long val; int ret; ret = strict_strtoul(buf, 0, &val); if (ret) IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); else { priv->debug_level = val; if (iwl_legacy_alloc_traffic_mem(priv)) IWL_ERR(priv, "Not enough memory to generate traffic log\n"); } return strnlen(buf, count); } static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, iwl4965_show_debug_level, iwl4965_store_debug_level); #endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ static ssize_t iwl4965_show_temperature(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); if (!iwl_legacy_is_alive(priv)) return -EAGAIN; return sprintf(buf, "%d\n", priv->temperature); } static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL); static ssize_t iwl4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); if (!iwl_legacy_is_ready_rf(priv)) return sprintf(buf, "off\n"); else return sprintf(buf, "%d\n", priv->tx_power_user_lmt); } static ssize_t iwl4965_store_tx_power(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct iwl_priv *priv = dev_get_drvdata(d); unsigned long val; int ret; ret = strict_strtoul(buf, 10, &val); if (ret) IWL_INFO(priv, "%s is not in decimal form.\n", buf); else { ret = iwl_legacy_set_tx_power(priv, val, false); if (ret) IWL_ERR(priv, "failed setting tx power (0x%d).\n", ret); else ret = count; } return ret; } static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl4965_show_tx_power, iwl4965_store_tx_power); static struct attribute *iwl_sysfs_entries[] = { &dev_attr_temperature.attr, &dev_attr_tx_power.attr, #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG &dev_attr_debug_level.attr, #endif NULL }; static struct attribute_group iwl_attribute_group = { .name = NULL, /* put in device directory */ .attrs = iwl_sysfs_entries, }; /****************************************************************************** * * uCode download functions * ******************************************************************************/ static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv) { iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); } static void iwl4965_nic_start(struct iwl_priv *priv) { /* Remove all resets to allow NIC to operate */ iwl_write32(priv, CSR_RESET, 0); } static void iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context); static int iwl4965_mac_setup_register(struct iwl_priv *priv, u32 max_probe_length); static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first) { const char *name_pre = priv->cfg->fw_name_pre; char tag[8]; if (first) { priv->fw_index = priv->cfg->ucode_api_max; sprintf(tag, "%d", priv->fw_index); } else { priv->fw_index--; sprintf(tag, "%d", priv->fw_index); } if (priv->fw_index < priv->cfg->ucode_api_min) { IWL_ERR(priv, "no suitable firmware found!\n"); return -ENOENT; } sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", priv->firmware_name); return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, &priv->pci_dev->dev, GFP_KERNEL, priv, iwl4965_ucode_callback); } struct iwl4965_firmware_pieces { const void *inst, *data, *init, *init_data, *boot; size_t inst_size, data_size, init_size, init_data_size, boot_size; }; static int iwl4965_load_firmware(struct iwl_priv *priv, const struct firmware *ucode_raw, struct iwl4965_firmware_pieces *pieces) { struct iwl_ucode_header *ucode = (void *)ucode_raw->data; u32 api_ver, hdr_size; const u8 *src; priv->ucode_ver = le32_to_cpu(ucode->ver); api_ver = IWL_UCODE_API(priv->ucode_ver); switch (api_ver) { default: case 0: case 1: case 2: hdr_size = 24; if (ucode_raw->size < hdr_size) { IWL_ERR(priv, "File size too small!\n"); return -EINVAL; } pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); pieces->data_size = le32_to_cpu(ucode->v1.data_size); pieces->init_size = le32_to_cpu(ucode->v1.init_size); pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size); pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); src = ucode->v1.data; break; } /* Verify size of file vs. image size info in file's header */ if (ucode_raw->size != hdr_size + pieces->inst_size + pieces->data_size + pieces->init_size + pieces->init_data_size + pieces->boot_size) { IWL_ERR(priv, "uCode file size %d does not match expected size\n", (int)ucode_raw->size); return -EINVAL; } pieces->inst = src; src += pieces->inst_size; pieces->data = src; src += pieces->data_size; pieces->init = src; src += pieces->init_size; pieces->init_data = src; src += pieces->init_data_size; pieces->boot = src; src += pieces->boot_size; return 0; } /** * iwl4965_ucode_callback - callback when firmware was loaded * * If loaded successfully, copies the firmware into buffers * for the card to fetch (via DMA). */ static void iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context) { struct iwl_priv *priv = context; struct iwl_ucode_header *ucode; int err; struct iwl4965_firmware_pieces pieces; const unsigned int api_max = priv->cfg->ucode_api_max; const unsigned int api_min = priv->cfg->ucode_api_min; u32 api_ver; u32 max_probe_length = 200; u32 standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; memset(&pieces, 0, sizeof(pieces)); if (!ucode_raw) { if (priv->fw_index <= priv->cfg->ucode_api_max) IWL_ERR(priv, "request for firmware file '%s' failed.\n", priv->firmware_name); goto try_again; } IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", priv->firmware_name, ucode_raw->size); /* Make sure that we got at least the API version number */ if (ucode_raw->size < 4) { IWL_ERR(priv, "File size way too small!\n"); goto try_again; } /* Data from ucode file: header followed by uCode images */ ucode = (struct iwl_ucode_header *)ucode_raw->data; err = iwl4965_load_firmware(priv, ucode_raw, &pieces); if (err) goto try_again; api_ver = IWL_UCODE_API(priv->ucode_ver); /* * api_ver should match the api version forming part of the * firmware filename ... but we don't check for that and only rely * on the API version read from firmware header from here on forward */ if (api_ver < api_min || api_ver > api_max) { IWL_ERR(priv, "Driver unable to support your firmware API. " "Driver supports v%u, firmware is v%u.\n", api_max, api_ver); goto try_again; } if (api_ver != api_max) IWL_ERR(priv, "Firmware has old API version. Expected v%u, " "got v%u. New firmware can be obtained " "from http://www.intellinuxwireless.org.\n", api_max, api_ver); IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", IWL_UCODE_MAJOR(priv->ucode_ver), IWL_UCODE_MINOR(priv->ucode_ver), IWL_UCODE_API(priv->ucode_ver), IWL_UCODE_SERIAL(priv->ucode_ver)); snprintf(priv->hw->wiphy->fw_version, sizeof(priv->hw->wiphy->fw_version), "%u.%u.%u.%u", IWL_UCODE_MAJOR(priv->ucode_ver), IWL_UCODE_MINOR(priv->ucode_ver), IWL_UCODE_API(priv->ucode_ver), IWL_UCODE_SERIAL(priv->ucode_ver)); /* * For any of the failures below (before allocating pci memory) * we will try to load a version with a smaller API -- maybe the * user just got a corrupted version of the latest API. */ IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", priv->ucode_ver); IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n", pieces.inst_size); IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n", pieces.data_size); IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n", pieces.init_size); IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n", pieces.init_data_size); IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n", pieces.boot_size); /* Verify that uCode images will fit in card's SRAM */ if (pieces.inst_size > priv->hw_params.max_inst_size) { IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n", pieces.inst_size); goto try_again; } if (pieces.data_size > priv->hw_params.max_data_size) { IWL_ERR(priv, "uCode data len %Zd too large to fit in\n", pieces.data_size); goto try_again; } if (pieces.init_size > priv->hw_params.max_inst_size) { IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n", pieces.init_size); goto try_again; } if (pieces.init_data_size > priv->hw_params.max_data_size) { IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n", pieces.init_data_size); goto try_again; } if (pieces.boot_size > priv->hw_params.max_bsm_size) { IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n", pieces.boot_size); goto try_again; } /* Allocate ucode buffers for card's bus-master loading ... */ /* Runtime instructions and 2 copies of data: * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ priv->ucode_code.len = pieces.inst_size; iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); priv->ucode_data.len = pieces.data_size; iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); priv->ucode_data_backup.len = pieces.data_size; iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || !priv->ucode_data_backup.v_addr) goto err_pci_alloc; /* Initialization instructions and data */ if (pieces.init_size && pieces.init_data_size) { priv->ucode_init.len = pieces.init_size; iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); priv->ucode_init_data.len = pieces.init_data_size; iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) goto err_pci_alloc; } /* Bootstrap (instructions only, no data) */ if (pieces.boot_size) { priv->ucode_boot.len = pieces.boot_size; iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); if (!priv->ucode_boot.v_addr) goto err_pci_alloc; } /* Now that we can no longer fail, copy information */ priv->sta_key_max_num = STA_KEY_MAX_NUM; /* Copy images into buffers for card's bus-master reads ... */ /* Runtime instructions (first block of data in file) */ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", pieces.inst_size); memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size); IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); /* * Runtime data * NOTE: Copy into backup buffer will be done in iwl_up() */ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", pieces.data_size); memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size); memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size); /* Initialization instructions */ if (pieces.init_size) { IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n", pieces.init_size); memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size); } /* Initialization data */ if (pieces.init_data_size) { IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n", pieces.init_data_size); memcpy(priv->ucode_init_data.v_addr, pieces.init_data, pieces.init_data_size); } /* Bootstrap instructions */ IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", pieces.boot_size); memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size); /* * figure out the offset of chain noise reset and gain commands * base on the size of standard phy calibration commands table size */ priv->_4965.phy_calib_chain_noise_reset_cmd = standard_phy_calibration_size; priv->_4965.phy_calib_chain_noise_gain_cmd = standard_phy_calibration_size + 1; /************************************************** * This is still part of probe() in a sense... * * 9. Setup and register with mac80211 and debugfs **************************************************/ err = iwl4965_mac_setup_register(priv, max_probe_length); if (err) goto out_unbind; err = iwl_legacy_dbgfs_register(priv, DRV_NAME); if (err) IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); err = sysfs_create_group(&priv->pci_dev->dev.kobj, &iwl_attribute_group); if (err) { IWL_ERR(priv, "failed to create sysfs device attributes\n"); goto out_unbind; } /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); complete(&priv->_4965.firmware_loading_complete); return; try_again: /* try next, if any */ if (iwl4965_request_firmware(priv, false)) goto out_unbind; release_firmware(ucode_raw); return; err_pci_alloc: IWL_ERR(priv, "failed to allocate pci memory\n"); iwl4965_dealloc_ucode_pci(priv); out_unbind: complete(&priv->_4965.firmware_loading_complete); device_release_driver(&priv->pci_dev->dev); release_firmware(ucode_raw); } static const char * const desc_lookup_text[] = { "OK", "FAIL", "BAD_PARAM", "BAD_CHECKSUM", "NMI_INTERRUPT_WDG", "SYSASSERT", "FATAL_ERROR", "BAD_COMMAND", "HW_ERROR_TUNE_LOCK", "HW_ERROR_TEMPERATURE", "ILLEGAL_CHAN_FREQ", "VCC_NOT_STABLE", "FH_ERROR", "NMI_INTERRUPT_HOST", "NMI_INTERRUPT_ACTION_PT", "NMI_INTERRUPT_UNKNOWN", "UCODE_VERSION_MISMATCH", "HW_ERROR_ABS_LOCK", "HW_ERROR_CAL_LOCK_FAIL", "NMI_INTERRUPT_INST_ACTION_PT", "NMI_INTERRUPT_DATA_ACTION_PT", "NMI_TRM_HW_ER", "NMI_INTERRUPT_TRM", "NMI_INTERRUPT_BREAK_POINT" "DEBUG_0", "DEBUG_1", "DEBUG_2", "DEBUG_3", }; static struct { char *name; u8 num; } advanced_lookup[] = { { "NMI_INTERRUPT_WDG", 0x34 }, { "SYSASSERT", 0x35 }, { "UCODE_VERSION_MISMATCH", 0x37 }, { "BAD_COMMAND", 0x38 }, { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, { "FATAL_ERROR", 0x3D }, { "NMI_TRM_HW_ERR", 0x46 }, { "NMI_INTERRUPT_TRM", 0x4C }, { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, { "NMI_INTERRUPT_HOST", 0x66 }, { "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, { "ADVANCED_SYSASSERT", 0 }, }; static const char *iwl4965_desc_lookup(u32 num) { int i; int max = ARRAY_SIZE(desc_lookup_text); if (num < max) return desc_lookup_text[num]; max = ARRAY_SIZE(advanced_lookup) - 1; for (i = 0; i < max; i++) { if (advanced_lookup[i].num == num) break; } return advanced_lookup[i].name; } #define ERROR_START_OFFSET (1 * sizeof(u32)) #define ERROR_ELEM_SIZE (7 * sizeof(u32)) void iwl4965_dump_nic_error_log(struct iwl_priv *priv) { u32 data2, line; u32 desc, time, count, base, data1; u32 blink1, blink2, ilink1, ilink2; u32 pc, hcmd; if (priv->ucode_type == UCODE_INIT) { base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); } else { base = le32_to_cpu(priv->card_alive.error_event_table_ptr); } if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { IWL_ERR(priv, "Not valid error log pointer 0x%08X for %s uCode\n", base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); return; } count = iwl_legacy_read_targ_mem(priv, base); if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { IWL_ERR(priv, "Start IWL Error Log Dump:\n"); IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", priv->status, count); } desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32)); priv->isr_stats.err_code = desc; pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32)); blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32)); blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32)); ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32)); ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32)); data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32)); data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32)); line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32)); time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32)); hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32)); trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, data2, line, blink1, blink2, ilink1, ilink2); IWL_ERR(priv, "Desc Time " "data1 data2 line\n"); IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", iwl4965_desc_lookup(desc), desc, time, data1, data2, line); IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1, blink2, ilink1, ilink2, hcmd); } #define EVENT_START_OFFSET (4 * sizeof(u32)) /** * iwl4965_print_event_log - Dump error event log to syslog * */ static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx, u32 num_events, u32 mode, int pos, char **buf, size_t bufsz) { u32 i; u32 base; /* SRAM byte address of event log header */ u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ u32 ptr; /* SRAM byte address of log data */ u32 ev, time, data; /* event log data */ unsigned long reg_flags; if (num_events == 0) return pos; if (priv->ucode_type == UCODE_INIT) { base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); } else { base = le32_to_cpu(priv->card_alive.log_event_table_ptr); } if (mode == 0) event_size = 2 * sizeof(u32); else event_size = 3 * sizeof(u32); ptr = base + EVENT_START_OFFSET + (start_idx * event_size); /* Make sure device is powered up for SRAM reads */ spin_lock_irqsave(&priv->reg_lock, reg_flags); iwl_grab_nic_access(priv); /* Set starting address; reads will auto-increment */ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); rmb(); /* "time" is actually "data" for mode 0 (no timestamp). * place event id # at far right for easier visual parsing. */ for (i = 0; i < num_events; i++) { ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); if (mode == 0) { /* data, ev */ if (bufsz) { pos += scnprintf(*buf + pos, bufsz - pos, "EVT_LOG:0x%08x:%04u\n", time, ev); } else { trace_iwlwifi_legacy_dev_ucode_event(priv, 0, time, ev); IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev); } } else { data = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); if (bufsz) { pos += scnprintf(*buf + pos, bufsz - pos, "EVT_LOGT:%010u:0x%08x:%04u\n", time, data, ev); } else { IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", time, data, ev); trace_iwlwifi_legacy_dev_ucode_event(priv, time, data, ev); } } } /* Allow device to power down */ iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->reg_lock, reg_flags); return pos; } /** * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog */ static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity, u32 num_wraps, u32 next_entry, u32 size, u32 mode, int pos, char **buf, size_t bufsz) { /* * display the newest DEFAULT_LOG_ENTRIES entries * i.e the entries just before the next ont that uCode would fill. */ if (num_wraps) { if (next_entry < size) { pos = iwl4965_print_event_log(priv, capacity - (size - next_entry), size - next_entry, mode, pos, buf, bufsz); pos = iwl4965_print_event_log(priv, 0, next_entry, mode, pos, buf, bufsz); } else pos = iwl4965_print_event_log(priv, next_entry - size, size, mode, pos, buf, bufsz); } else { if (next_entry < size) { pos = iwl4965_print_event_log(priv, 0, next_entry, mode, pos, buf, bufsz); } else { pos = iwl4965_print_event_log(priv, next_entry - size, size, mode, pos, buf, bufsz); } } return pos; } #define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log, char **buf, bool display) { u32 base; /* SRAM byte address of event log header */ u32 capacity; /* event log capacity in # entries */ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ u32 num_wraps; /* # times uCode wrapped to top of log */ u32 next_entry; /* index of next entry to be written by uCode */ u32 size; /* # entries that we'll print */ int pos = 0; size_t bufsz = 0; if (priv->ucode_type == UCODE_INIT) { base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); } else { base = le32_to_cpu(priv->card_alive.log_event_table_ptr); } if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { IWL_ERR(priv, "Invalid event log pointer 0x%08X for %s uCode\n", base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); return -EINVAL; } /* event log header */ capacity = iwl_legacy_read_targ_mem(priv, base); mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32))); next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32))); size = num_wraps ? capacity : next_entry; /* bail out if nothing in log */ if (size == 0) { IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); return pos; } #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; #else size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; #endif IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", size); #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG if (display) { if (full_log) bufsz = capacity * 48; else bufsz = size * 48; *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; } if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { /* * if uCode has wrapped back to top of log, * start at the oldest entry, * i.e the next one that uCode would fill. */ if (num_wraps) pos = iwl4965_print_event_log(priv, next_entry, capacity - next_entry, mode, pos, buf, bufsz); /* (then/else) start at top of log */ pos = iwl4965_print_event_log(priv, 0, next_entry, mode, pos, buf, bufsz); } else pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps, next_entry, size, mode, pos, buf, bufsz); #else pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps, next_entry, size, mode, pos, buf, bufsz); #endif return pos; } static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) { struct iwl_ct_kill_config cmd; unsigned long flags; int ret = 0; spin_lock_irqsave(&priv->lock, flags); iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); spin_unlock_irqrestore(&priv->lock, flags); cmd.critical_temperature_R = cpu_to_le32(priv->hw_params.ct_kill_threshold); ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd); if (ret) IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); else IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " "succeeded, " "critical temperature is %d\n", priv->hw_params.ct_kill_threshold); } static const s8 default_queue_to_tx_fifo[] = { IWL_TX_FIFO_VO, IWL_TX_FIFO_VI, IWL_TX_FIFO_BE, IWL_TX_FIFO_BK, IWL49_CMD_FIFO_NUM, IWL_TX_FIFO_UNUSED, IWL_TX_FIFO_UNUSED, }; static int iwl4965_alive_notify(struct iwl_priv *priv) { u32 a; unsigned long flags; int i, chan; u32 reg_val; spin_lock_irqsave(&priv->lock, flags); /* Clear 4965's internal Tx Scheduler data base */ priv->scd_base_addr = iwl_legacy_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR); a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET; for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) iwl_legacy_write_targ_mem(priv, a, 0); for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) iwl_legacy_write_targ_mem(priv, a, 0); for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) iwl_legacy_write_targ_mem(priv, a, 0); /* Tel 4965 where to find Tx byte count tables */ iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, priv->scd_bc_tbls.dma >> 10); /* Enable DMA channel */ for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++) iwl_legacy_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); /* Update FH chicken bits */ reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Disable chain mode for all queues */ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); /* Initialize each Tx queue (including the command queue) */ for (i = 0; i < priv->hw_params.max_txq_num; i++) { /* TFD circular buffer read/write indexes */ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0); iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); /* Max Tx Window size for Scheduler-ACK mode */ iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i), (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); /* Frame limit */ iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof(u32), (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); } iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << priv->hw_params.max_txq_num) - 1); /* Activate all Tx DMA/FIFO channels */ iwl4965_txq_set_sched(priv, IWL_MASK(0, 6)); iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0); /* make sure all queue are not stopped */ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); for (i = 0; i < 4; i++) atomic_set(&priv->queue_stop_count[i], 0); /* reset to 0 to enable all the queue first */ priv->txq_ctx_active_msk = 0; /* Map each Tx/cmd queue to its corresponding fifo */ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { int ac = default_queue_to_tx_fifo[i]; iwl_txq_ctx_activate(priv, i); if (ac == IWL_TX_FIFO_UNUSED) continue; iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); } spin_unlock_irqrestore(&priv->lock, flags); return 0; } /** * iwl4965_alive_start - called after REPLY_ALIVE notification received * from protocol/runtime uCode (initialization uCode's * Alive gets handled by iwl_init_alive_start()). */ static void iwl4965_alive_start(struct iwl_priv *priv) { int ret = 0; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); if (priv->card_alive.is_valid != UCODE_VALID_OK) { /* We had an error bringing up the hardware, so take it * all the way back down so we can try again */ IWL_DEBUG_INFO(priv, "Alive failed.\n"); goto restart; } /* Initialize uCode has loaded Runtime uCode ... verify inst image. * This is a paranoid check, because we would not have gotten the * "runtime" alive if code weren't properly loaded. */ if (iwl4965_verify_ucode(priv)) { /* Runtime instruction load was bad; * take it all the way back down so we can try again */ IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); goto restart; } ret = iwl4965_alive_notify(priv); if (ret) { IWL_WARN(priv, "Could not complete ALIVE transition [ntf]: %d\n", ret); goto restart; } /* After the ALIVE response, we can send host commands to the uCode */ set_bit(STATUS_ALIVE, &priv->status); /* Enable watchdog to monitor the driver tx queues */ iwl_legacy_setup_watchdog(priv); if (iwl_legacy_is_rfkill(priv)) return; ieee80211_wake_queues(priv->hw); priv->active_rate = IWL_RATES_MASK; if (iwl_legacy_is_associated_ctx(ctx)) { struct iwl_legacy_rxon_cmd *active_rxon = (struct iwl_legacy_rxon_cmd *)&ctx->active; /* apply any changes in staging */ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; } else { struct iwl_rxon_context *tmp; /* Initialize our rx_config data */ for_each_context(priv, tmp) iwl_legacy_connection_init_rx_config(priv, tmp); if (priv->cfg->ops->hcmd->set_rxon_chain) priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); } /* Configure bluetooth coexistence if enabled */ iwl_legacy_send_bt_config(priv); iwl4965_reset_run_time_calib(priv); set_bit(STATUS_READY, &priv->status); /* Configure the adapter for unassociated operation */ iwl_legacy_commit_rxon(priv, ctx); /* At this point, the NIC is initialized and operational */ iwl4965_rf_kill_ct_config(priv); IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); wake_up(&priv->wait_command_queue); iwl_legacy_power_update_mode(priv, true); IWL_DEBUG_INFO(priv, "Updated power mode\n"); return; restart: queue_work(priv->workqueue, &priv->restart); } static void iwl4965_cancel_deferred_work(struct iwl_priv *priv); static void __iwl4965_down(struct iwl_priv *priv) { unsigned long flags; int exit_pending; IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); iwl_legacy_scan_cancel_timeout(priv, 200); exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set * to prevent rearm timer */ del_timer_sync(&priv->watchdog); iwl_legacy_clear_ucode_stations(priv, NULL); iwl_legacy_dealloc_bcast_stations(priv); iwl_legacy_clear_driver_stations(priv); /* Unblock any waiting calls */ wake_up_all(&priv->wait_command_queue); /* Wipe out the EXIT_PENDING status bit if we are not actually * exiting the module */ if (!exit_pending) clear_bit(STATUS_EXIT_PENDING, &priv->status); /* stop and reset the on-board processor */ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); /* tell the device to stop sending interrupts */ spin_lock_irqsave(&priv->lock, flags); iwl_legacy_disable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); iwl4965_synchronize_irq(priv); if (priv->mac80211_registered) ieee80211_stop_queues(priv->hw); /* If we have not previously called iwl_init() then * clear all bits but the RF Kill bit and return */ if (!iwl_legacy_is_init(priv)) { priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << STATUS_RF_KILL_HW | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << STATUS_GEO_CONFIGURED | test_bit(STATUS_EXIT_PENDING, &priv->status) << STATUS_EXIT_PENDING; goto exit; } /* ...otherwise clear out all the status bits but the RF Kill * bit and continue taking the NIC down. */ priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << STATUS_RF_KILL_HW | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << STATUS_GEO_CONFIGURED | test_bit(STATUS_FW_ERROR, &priv->status) << STATUS_FW_ERROR | test_bit(STATUS_EXIT_PENDING, &priv->status) << STATUS_EXIT_PENDING; iwl4965_txq_ctx_stop(priv); iwl4965_rxq_stop(priv); /* Power-down device's busmaster DMA clocks */ iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); /* Make sure (redundant) we've released our request to stay awake */ iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* Stop the device, and put it in low power state */ iwl_legacy_apm_stop(priv); exit: memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); dev_kfree_skb(priv->beacon_skb); priv->beacon_skb = NULL; /* clear out any free frames */ iwl4965_clear_free_frames(priv); } static void iwl4965_down(struct iwl_priv *priv) { mutex_lock(&priv->mutex); __iwl4965_down(priv); mutex_unlock(&priv->mutex); iwl4965_cancel_deferred_work(priv); } #define HW_READY_TIMEOUT (50) static int iwl4965_set_hw_ready(struct iwl_priv *priv) { int ret = 0; iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); /* See if we got it */ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); if (ret != -ETIMEDOUT) priv->hw_ready = true; else priv->hw_ready = false; IWL_DEBUG_INFO(priv, "hardware %s\n", (priv->hw_ready == 1) ? "ready" : "not ready"); return ret; } static int iwl4965_prepare_card_hw(struct iwl_priv *priv) { int ret = 0; IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n"); ret = iwl4965_set_hw_ready(priv); if (priv->hw_ready) return ret; /* If HW is not ready, prepare the conditions to check again */ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); /* HW should be ready by now, check again. */ if (ret != -ETIMEDOUT) iwl4965_set_hw_ready(priv); return ret; } #define MAX_HW_RESTARTS 5 static int __iwl4965_up(struct iwl_priv *priv) { struct iwl_rxon_context *ctx; int i; int ret; if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); return -EIO; } if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { IWL_ERR(priv, "ucode not available for device bringup\n"); return -EIO; } for_each_context(priv, ctx) { ret = iwl4965_alloc_bcast_station(priv, ctx); if (ret) { iwl_legacy_dealloc_bcast_stations(priv); return ret; } } iwl4965_prepare_card_hw(priv); if (!priv->hw_ready) { IWL_WARN(priv, "Exit HW not ready\n"); return -EIO; } /* If platform's RF_KILL switch is NOT set to KILL */ if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) clear_bit(STATUS_RF_KILL_HW, &priv->status); else set_bit(STATUS_RF_KILL_HW, &priv->status); if (iwl_legacy_is_rfkill(priv)) { wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); iwl_legacy_enable_interrupts(priv); IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); return 0; } iwl_write32(priv, CSR_INT, 0xFFFFFFFF); /* must be initialised before iwl_hw_nic_init */ priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; ret = iwl4965_hw_nic_init(priv); if (ret) { IWL_ERR(priv, "Unable to init nic\n"); return ret; } /* make sure rfkill handshake bits are cleared */ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ iwl_write32(priv, CSR_INT, 0xFFFFFFFF); iwl_legacy_enable_interrupts(priv); /* really make sure rfkill handshake bits are cleared */ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Copy original ucode data image from disk into backup cache. * This will be used to initialize the on-board processor's * data SRAM for a clean start when the runtime program first loads. */ memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, priv->ucode_data.len); for (i = 0; i < MAX_HW_RESTARTS; i++) { /* load bootstrap state machine, * load bootstrap program into processor's memory, * prepare to load the "initialize" uCode */ ret = priv->cfg->ops->lib->load_ucode(priv); if (ret) { IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", ret); continue; } /* start card; "initialize" will load runtime ucode */ iwl4965_nic_start(priv); IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); return 0; } set_bit(STATUS_EXIT_PENDING, &priv->status); __iwl4965_down(priv); clear_bit(STATUS_EXIT_PENDING, &priv->status); /* tried to restart and config the device for as long as our * patience could withstand */ IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); return -EIO; } /***************************************************************************** * * Workqueue callbacks * *****************************************************************************/ static void iwl4965_bg_init_alive_start(struct work_struct *data) { struct iwl_priv *priv = container_of(data, struct iwl_priv, init_alive_start.work); mutex_lock(&priv->mutex); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) goto out; priv->cfg->ops->lib->init_alive_start(priv); out: mutex_unlock(&priv->mutex); } static void iwl4965_bg_alive_start(struct work_struct *data) { struct iwl_priv *priv = container_of(data, struct iwl_priv, alive_start.work); mutex_lock(&priv->mutex); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) goto out; iwl4965_alive_start(priv); out: mutex_unlock(&priv->mutex); } static void iwl4965_bg_run_time_calib_work(struct work_struct *work) { struct iwl_priv *priv = container_of(work, struct iwl_priv, run_time_calib_work); mutex_lock(&priv->mutex); if (test_bit(STATUS_EXIT_PENDING, &priv->status) || test_bit(STATUS_SCANNING, &priv->status)) { mutex_unlock(&priv->mutex); return; } if (priv->start_calib) { iwl4965_chain_noise_calibration(priv, (void *)&priv->_4965.statistics); iwl4965_sensitivity_calibration(priv, (void *)&priv->_4965.statistics); } mutex_unlock(&priv->mutex); } static void iwl4965_bg_restart(struct work_struct *data) { struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { struct iwl_rxon_context *ctx; mutex_lock(&priv->mutex); for_each_context(priv, ctx) ctx->vif = NULL; priv->is_open = 0; __iwl4965_down(priv); mutex_unlock(&priv->mutex); iwl4965_cancel_deferred_work(priv); ieee80211_restart_hw(priv->hw); } else { iwl4965_down(priv); mutex_lock(&priv->mutex); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { mutex_unlock(&priv->mutex); return; } __iwl4965_up(priv); mutex_unlock(&priv->mutex); } } static void iwl4965_bg_rx_replenish(struct work_struct *data) { struct iwl_priv *priv = container_of(data, struct iwl_priv, rx_replenish); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; mutex_lock(&priv->mutex); iwl4965_rx_replenish(priv); mutex_unlock(&priv->mutex); } /***************************************************************************** * * mac80211 entry point functions * *****************************************************************************/ #define UCODE_READY_TIMEOUT (4 * HZ) /* * Not a mac80211 entry point function, but it fits in with all the * other mac80211 functions grouped here. */ static int iwl4965_mac_setup_register(struct iwl_priv *priv, u32 max_probe_length) { int ret; struct ieee80211_hw *hw = priv->hw; struct iwl_rxon_context *ctx; hw->rate_control_algorithm = "iwl-4965-rs"; /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_REPORTS_TX_ACK_STATUS; if (priv->cfg->sku & IWL_SKU_N) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_SUPPORTS_STATIC_SMPS; hw->sta_data_size = sizeof(struct iwl_station_priv); hw->vif_data_size = sizeof(struct iwl_vif_priv); for_each_context(priv, ctx) { hw->wiphy->interface_modes |= ctx->interface_modes; hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; } hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; /* * For now, disable PS by default because it affects * RX performance significantly. */ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; /* we create the 802.11 header and a zero-length SSID element */ hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; /* Default value; 4 EDCA QOS priorities */ hw->queues = 4; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->bands[IEEE80211_BAND_2GHZ]; if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->bands[IEEE80211_BAND_5GHZ]; iwl_legacy_leds_init(priv); ret = ieee80211_register_hw(priv->hw); if (ret) { IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); return ret; } priv->mac80211_registered = 1; return 0; } int iwl4965_mac_start(struct ieee80211_hw *hw) { struct iwl_priv *priv = hw->priv; int ret; IWL_DEBUG_MAC80211(priv, "enter\n"); /* we should be verifying the device is ready to be opened */ mutex_lock(&priv->mutex); ret = __iwl4965_up(priv); mutex_unlock(&priv->mutex); if (ret) return ret; if (iwl_legacy_is_rfkill(priv)) goto out; IWL_DEBUG_INFO(priv, "Start UP work done.\n"); /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from * mac80211 will not be run successfully. */ ret = wait_event_timeout(priv->wait_command_queue, test_bit(STATUS_READY, &priv->status), UCODE_READY_TIMEOUT); if (!ret) { if (!test_bit(STATUS_READY, &priv->status)) { IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", jiffies_to_msecs(UCODE_READY_TIMEOUT)); return -ETIMEDOUT; } } iwl4965_led_enable(priv); out: priv->is_open = 1; IWL_DEBUG_MAC80211(priv, "leave\n"); return 0; } void iwl4965_mac_stop(struct ieee80211_hw *hw) { struct iwl_priv *priv = hw->priv; IWL_DEBUG_MAC80211(priv, "enter\n"); if (!priv->is_open) return; priv->is_open = 0; iwl4965_down(priv); flush_workqueue(priv->workqueue); /* User space software may expect getting rfkill changes * even if interface is down */ iwl_write32(priv, CSR_INT, 0xFFFFFFFF); iwl_legacy_enable_rfkill_int(priv); IWL_DEBUG_MAC80211(priv, "leave\n"); } void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct iwl_priv *priv = hw->priv; IWL_DEBUG_MACDUMP(priv, "enter\n"); IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); if (iwl4965_tx_skb(priv, skb)) dev_kfree_skb_any(skb); IWL_DEBUG_MACDUMP(priv, "leave\n"); } void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_priv *priv = hw->priv; struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; IWL_DEBUG_MAC80211(priv, "enter\n"); iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta, iv32, phase1key); IWL_DEBUG_MAC80211(priv, "leave\n"); } int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_priv *priv = hw->priv; struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *ctx = vif_priv->ctx; int ret; u8 sta_id; bool is_default_wep_key = false; IWL_DEBUG_MAC80211(priv, "enter\n"); if (priv->cfg->mod_params->sw_crypto) { IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); return -EOPNOTSUPP; } sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta); if (sta_id == IWL_INVALID_STATION) return -EINVAL; mutex_lock(&priv->mutex); iwl_legacy_scan_cancel_timeout(priv, 100); /* * If we are getting WEP group key and we didn't receive any key mapping * so far, we are in legacy wep mode (group key only), otherwise we are * in 1X mode. * In legacy wep mode, we use another host command to the uCode. */ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { if (cmd == SET_KEY) is_default_wep_key = !ctx->key_mapping_keys; else is_default_wep_key = (key->hw_key_idx == HW_KEY_DEFAULT); } switch (cmd) { case SET_KEY: if (is_default_wep_key) ret = iwl4965_set_default_wep_key(priv, vif_priv->ctx, key); else ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx, key, sta_id); IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); break; case DISABLE_KEY: if (is_default_wep_key) ret = iwl4965_remove_default_wep_key(priv, ctx, key); else ret = iwl4965_remove_dynamic_key(priv, ctx, key, sta_id); IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); break; default: ret = -EINVAL; } mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return ret; } int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) { struct iwl_priv *priv = hw->priv; int ret = -EINVAL; IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", sta->addr, tid); if (!(priv->cfg->sku & IWL_SKU_N)) return -EACCES; mutex_lock(&priv->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: IWL_DEBUG_HT(priv, "start Rx\n"); ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn); break; case IEEE80211_AMPDU_RX_STOP: IWL_DEBUG_HT(priv, "stop Rx\n"); ret = iwl4965_sta_rx_agg_stop(priv, sta, tid); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ret = 0; break; case IEEE80211_AMPDU_TX_START: IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn); if (ret == 0) { priv->_4965.agg_tids_count++; IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n", priv->_4965.agg_tids_count); } break; case IEEE80211_AMPDU_TX_STOP: IWL_DEBUG_HT(priv, "stop Tx\n"); ret = iwl4965_tx_agg_stop(priv, vif, sta, tid); if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) { priv->_4965.agg_tids_count--; IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n", priv->_4965.agg_tids_count); } if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ret = 0; break; case IEEE80211_AMPDU_TX_OPERATIONAL: ret = 0; break; } mutex_unlock(&priv->mutex); return ret; } int iwl4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_priv *priv = hw->priv; struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; bool is_ap = vif->type == NL80211_IFTYPE_STATION; int ret; u8 sta_id; IWL_DEBUG_INFO(priv, "received request to add station %pM\n", sta->addr); mutex_lock(&priv->mutex); IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", sta->addr); sta_priv->common.sta_id = IWL_INVALID_STATION; atomic_set(&sta_priv->pending_frames, 0); ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr, is_ap, sta, &sta_id); if (ret) { IWL_ERR(priv, "Unable to add station %pM (%d)\n", sta->addr, ret); /* Should we return success if return code is EEXIST ? */ mutex_unlock(&priv->mutex); return ret; } sta_priv->common.sta_id = sta_id; /* Initialize rate scaling */ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", sta->addr); iwl4965_rs_rate_init(priv, sta, sta_id); mutex_unlock(&priv->mutex); return 0; } void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_channel_switch *ch_switch) { struct iwl_priv *priv = hw->priv; const struct iwl_channel_info *ch_info; struct ieee80211_conf *conf = &hw->conf; struct ieee80211_channel *channel = ch_switch->channel; struct iwl_ht_config *ht_conf = &priv->current_ht_config; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; u16 ch; unsigned long flags = 0; IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); if (iwl_legacy_is_rfkill(priv)) goto out; if (test_bit(STATUS_EXIT_PENDING, &priv->status) || test_bit(STATUS_SCANNING, &priv->status) || test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) goto out; if (!iwl_legacy_is_associated_ctx(ctx)) goto out; if (priv->cfg->ops->lib->set_channel_switch) { ch = channel->hw_value; if (le16_to_cpu(ctx->active.channel) != ch) { ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch); if (!iwl_legacy_is_channel_valid(ch_info)) { IWL_DEBUG_MAC80211(priv, "invalid channel\n"); goto out; } spin_lock_irqsave(&priv->lock, flags); priv->current_ht_config.smps = conf->smps_mode; /* Configure HT40 channels */ ctx->ht.enabled = conf_is_ht(conf); if (ctx->ht.enabled) { if (conf_is_ht40_minus(conf)) { ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW; ctx->ht.is_40mhz = true; } else if (conf_is_ht40_plus(conf)) { ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; ctx->ht.is_40mhz = true; } else { ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; ctx->ht.is_40mhz = false; } } else ctx->ht.is_40mhz = false; if ((le16_to_cpu(ctx->staging.channel) != ch)) ctx->staging.flags = 0; iwl_legacy_set_rxon_channel(priv, channel, ctx); iwl_legacy_set_rxon_ht(priv, ht_conf); iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif); spin_unlock_irqrestore(&priv->lock, flags); iwl_legacy_set_rate(priv); /* * at this point, staging_rxon has the * configuration for channel switch */ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); priv->switch_channel = cpu_to_le16(ch); if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) { clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); priv->switch_channel = 0; ieee80211_chswitch_done(ctx->vif, false); } } } out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); } void iwl4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct iwl_priv *priv = hw->priv; __le32 filter_or = 0, filter_nand = 0; struct iwl_rxon_context *ctx; #define CHK(test, flag) do { \ if (*total_flags & (test)) \ filter_or |= (flag); \ else \ filter_nand |= (flag); \ } while (0) IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", changed_flags, *total_flags); CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); #undef CHK mutex_lock(&priv->mutex); for_each_context(priv, ctx) { ctx->staging.filter_flags &= ~filter_nand; ctx->staging.filter_flags |= filter_or; /* * Not committing directly because hardware can perform a scan, * but we'll eventually commit the filter flags change anyway. */ } mutex_unlock(&priv->mutex); /* * Receiving all multicast frames is always enabled by the * default flags setup in iwl_legacy_connection_init_rx_config() * since we currently do not support programming multicast * filters into the device. */ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; } /***************************************************************************** * * driver setup and teardown * *****************************************************************************/ static void iwl4965_bg_txpower_work(struct work_struct *work) { struct iwl_priv *priv = container_of(work, struct iwl_priv, txpower_work); mutex_lock(&priv->mutex); /* If a scan happened to start before we got here * then just return; the statistics notification will * kick off another scheduled work to compensate for * any temperature delta we missed here. */ if (test_bit(STATUS_EXIT_PENDING, &priv->status) || test_bit(STATUS_SCANNING, &priv->status)) goto out; /* Regardless of if we are associated, we must reconfigure the * TX power since frames can be sent on non-radar channels while * not associated */ priv->cfg->ops->lib->send_tx_power(priv); /* Update last_temperature to keep is_calib_needed from running * when it isn't needed... */ priv->last_temperature = priv->temperature; out: mutex_unlock(&priv->mutex); } static void iwl4965_setup_deferred_work(struct iwl_priv *priv) { priv->workqueue = create_singlethread_workqueue(DRV_NAME); init_waitqueue_head(&priv->wait_command_queue); INIT_WORK(&priv->restart, iwl4965_bg_restart); INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish); INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work); INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start); INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start); iwl_legacy_setup_scan_deferred_work(priv); INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); init_timer(&priv->statistics_periodic); priv->statistics_periodic.data = (unsigned long)priv; priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; init_timer(&priv->ucode_trace); priv->ucode_trace.data = (unsigned long)priv; priv->ucode_trace.function = iwl4965_bg_ucode_trace; init_timer(&priv->watchdog); priv->watchdog.data = (unsigned long)priv; priv->watchdog.function = iwl_legacy_bg_watchdog; tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) iwl4965_irq_tasklet, (unsigned long)priv); } static void iwl4965_cancel_deferred_work(struct iwl_priv *priv) { cancel_work_sync(&priv->txpower_work); cancel_delayed_work_sync(&priv->init_alive_start); cancel_delayed_work(&priv->alive_start); cancel_work_sync(&priv->run_time_calib_work); iwl_legacy_cancel_scan_deferred_work(priv); del_timer_sync(&priv->statistics_periodic); del_timer_sync(&priv->ucode_trace); } static void iwl4965_init_hw_rates(struct iwl_priv *priv, struct ieee80211_rate *rates) { int i; for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { rates[i].bitrate = iwlegacy_rates[i].ieee * 5; rates[i].hw_value = i; /* Rate scaling will work on indexes */ rates[i].hw_value_short = i; rates[i].flags = 0; if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { /* * If CCK != 1M then set short preamble rate flag. */ rates[i].flags |= (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; } } } /* * Acquire priv->lock before calling this function ! */ void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) { iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index); } void iwl4965_tx_queue_set_status(struct iwl_priv *priv, struct iwl_tx_queue *txq, int tx_fifo_id, int scd_retry) { int txq_id = txq->q.id; /* Find out whether to activate Tx queue */ int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; /* Set up and activate */ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) | (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) | (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | IWL49_SCD_QUEUE_STTS_REG_MSK); txq->sched_retry = scd_retry; IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate", scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); } static int iwl4965_init_drv(struct iwl_priv *priv) { int ret; spin_lock_init(&priv->sta_lock); spin_lock_init(&priv->hcmd_lock); INIT_LIST_HEAD(&priv->free_frames); mutex_init(&priv->mutex); priv->ieee_channels = NULL; priv->ieee_rates = NULL; priv->band = IEEE80211_BAND_2GHZ; priv->iw_mode = NL80211_IFTYPE_STATION; priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; priv->_4965.agg_tids_count = 0; /* initialize force reset */ priv->force_reset[IWL_RF_RESET].reset_duration = IWL_DELAY_NEXT_FORCE_RF_RESET; priv->force_reset[IWL_FW_RESET].reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD; /* Choose which receivers/antennas to use */ if (priv->cfg->ops->hcmd->set_rxon_chain) priv->cfg->ops->hcmd->set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]); iwl_legacy_init_scan_params(priv); ret = iwl_legacy_init_channel_map(priv); if (ret) { IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); goto err; } ret = iwl_legacy_init_geos(priv); if (ret) { IWL_ERR(priv, "initializing geos failed: %d\n", ret); goto err_free_channel_map; } iwl4965_init_hw_rates(priv, priv->ieee_rates); return 0; err_free_channel_map: iwl_legacy_free_channel_map(priv); err: return ret; } static void iwl4965_uninit_drv(struct iwl_priv *priv) { iwl4965_calib_free_results(priv); iwl_legacy_free_geos(priv); iwl_legacy_free_channel_map(priv); kfree(priv->scan_cmd); } static void iwl4965_hw_detect(struct iwl_priv *priv) { priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV); priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG); priv->rev_id = priv->pci_dev->revision; IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); } static int iwl4965_set_hw_params(struct iwl_priv *priv) { priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; if (priv->cfg->mod_params->amsdu_size_8K) priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K); else priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; if (priv->cfg->mod_params->disable_11n) priv->cfg->sku &= ~IWL_SKU_N; /* Device-specific setup */ return priv->cfg->ops->lib->set_hw_params(priv); } static const u8 iwl4965_bss_ac_to_fifo[] = { IWL_TX_FIFO_VO, IWL_TX_FIFO_VI, IWL_TX_FIFO_BE, IWL_TX_FIFO_BK, }; static const u8 iwl4965_bss_ac_to_queue[] = { 0, 1, 2, 3, }; static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err = 0, i; struct iwl_priv *priv; struct ieee80211_hw *hw; struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); unsigned long flags; u16 pci_cmd; /************************ * 1. Allocating HW data ************************/ hw = iwl_legacy_alloc_all(cfg); if (!hw) { err = -ENOMEM; goto out; } priv = hw->priv; /* At this point both hw and priv are allocated. */ /* * The default context is always valid, * more may be discovered when firmware * is loaded. */ priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); for (i = 0; i < NUM_IWL_RXON_CTX; i++) priv->contexts[i].ctxid = i; priv->contexts[IWL_RXON_CTX_BSS].always_active = true; priv->contexts[IWL_RXON_CTX_BSS].is_active = true; priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo; priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue; priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC); priv->contexts[IWL_RXON_CTX_BSS].interface_modes = BIT(NL80211_IFTYPE_STATION); priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP; priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1); SET_IEEE80211_DEV(hw, &pdev->dev); IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); priv->cfg = cfg; priv->pci_dev = pdev; priv->inta_mask = CSR_INI_SET_MASK; if (iwl_legacy_alloc_traffic_mem(priv)) IWL_ERR(priv, "Not enough memory to generate traffic log\n"); /************************** * 2. Initializing PCI bus **************************/ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); if (pci_enable_device(pdev)) { err = -ENODEV; goto out_ieee80211_free_hw; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (err) { IWL_WARN(priv, "No suitable DMA available.\n"); goto out_pci_disable_device; } } err = pci_request_regions(pdev, DRV_NAME); if (err) goto out_pci_disable_device; pci_set_drvdata(pdev, priv); /*********************** * 3. Read REV register ***********************/ priv->hw_base = pci_iomap(pdev, 0, 0); if (!priv->hw_base) { err = -ENODEV; goto out_pci_release_regions; } IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", (unsigned long long) pci_resource_len(pdev, 0)); IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); /* these spin locks will be used in apm_ops.init and EEPROM access * we should init now */ spin_lock_init(&priv->reg_lock); spin_lock_init(&priv->lock); /* * stop and reset the on-board processor just in case it is in a * strange state ... like being left stranded by a primary kernel * and this is now the kdump kernel trying to start up */ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); iwl4965_hw_detect(priv); IWL_INFO(priv, "Detected %s, REV=0x%X\n", priv->cfg->name, priv->hw_rev); /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); iwl4965_prepare_card_hw(priv); if (!priv->hw_ready) { IWL_WARN(priv, "Failed, HW not ready\n"); goto out_iounmap; } /***************** * 4. Read EEPROM *****************/ /* Read the EEPROM */ err = iwl_legacy_eeprom_init(priv); if (err) { IWL_ERR(priv, "Unable to init EEPROM\n"); goto out_iounmap; } err = iwl4965_eeprom_check_version(priv); if (err) goto out_free_eeprom; if (err) goto out_free_eeprom; /* extract MAC Address */ iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr); IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); priv->hw->wiphy->addresses = priv->addresses; priv->hw->wiphy->n_addresses = 1; /************************ * 5. Setup HW constants ************************/ if (iwl4965_set_hw_params(priv)) { IWL_ERR(priv, "failed to set hw parameters\n"); goto out_free_eeprom; } /******************* * 6. Setup priv *******************/ err = iwl4965_init_drv(priv); if (err) goto out_free_eeprom; /* At this point both hw and priv are initialized. */ /******************** * 7. Setup services ********************/ spin_lock_irqsave(&priv->lock, flags); iwl_legacy_disable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); pci_enable_msi(priv->pci_dev); err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, IRQF_SHARED, DRV_NAME, priv); if (err) { IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); goto out_disable_msi; } iwl4965_setup_deferred_work(priv); iwl4965_setup_rx_handlers(priv); /********************************************* * 8. Enable interrupts and read RFKILL state *********************************************/ /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); } iwl_legacy_enable_rfkill_int(priv); /* If platform's RF_KILL switch is NOT set to KILL */ if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) clear_bit(STATUS_RF_KILL_HW, &priv->status); else set_bit(STATUS_RF_KILL_HW, &priv->status); wiphy_rfkill_set_hw_state(priv->hw->wiphy, test_bit(STATUS_RF_KILL_HW, &priv->status)); iwl_legacy_power_initialize(priv); init_completion(&priv->_4965.firmware_loading_complete); err = iwl4965_request_firmware(priv, true); if (err) goto out_destroy_workqueue; return 0; out_destroy_workqueue: destroy_workqueue(priv->workqueue); priv->workqueue = NULL; free_irq(priv->pci_dev->irq, priv); out_disable_msi: pci_disable_msi(priv->pci_dev); iwl4965_uninit_drv(priv); out_free_eeprom: iwl_legacy_eeprom_free(priv); out_iounmap: pci_iounmap(pdev, priv->hw_base); out_pci_release_regions: pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); out_pci_disable_device: pci_disable_device(pdev); out_ieee80211_free_hw: iwl_legacy_free_traffic_mem(priv); ieee80211_free_hw(priv->hw); out: return err; } static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) { struct iwl_priv *priv = pci_get_drvdata(pdev); unsigned long flags; if (!priv) return; wait_for_completion(&priv->_4965.firmware_loading_complete); IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); iwl_legacy_dbgfs_unregister(priv); sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); /* ieee80211_unregister_hw call wil cause iwl_mac_stop to * to be called and iwl4965_down since we are removing the device * we need to set STATUS_EXIT_PENDING bit. */ set_bit(STATUS_EXIT_PENDING, &priv->status); iwl_legacy_leds_exit(priv); if (priv->mac80211_registered) { ieee80211_unregister_hw(priv->hw); priv->mac80211_registered = 0; } else { iwl4965_down(priv); } /* * Make sure device is reset to low power before unloading driver. * This may be redundant with iwl4965_down(), but there are paths to * run iwl4965_down() without calling apm_ops.stop(), and there are * paths to avoid running iwl4965_down() at all before leaving driver. * This (inexpensive) call *makes sure* device is reset. */ iwl_legacy_apm_stop(priv); /* make sure we flush any pending irq or * tasklet for the driver */ spin_lock_irqsave(&priv->lock, flags); iwl_legacy_disable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); iwl4965_synchronize_irq(priv); iwl4965_dealloc_ucode_pci(priv); if (priv->rxq.bd) iwl4965_rx_queue_free(priv, &priv->rxq); iwl4965_hw_txq_ctx_free(priv); iwl_legacy_eeprom_free(priv); /*netif_stop_queue(dev); */ flush_workqueue(priv->workqueue); /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes * priv->workqueue... so we can't take down the workqueue * until now... */ destroy_workqueue(priv->workqueue); priv->workqueue = NULL; iwl_legacy_free_traffic_mem(priv); free_irq(priv->pci_dev->irq, priv); pci_disable_msi(priv->pci_dev); pci_iounmap(pdev, priv->hw_base); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); iwl4965_uninit_drv(priv); dev_kfree_skb(priv->beacon_skb); ieee80211_free_hw(priv->hw); } /* * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask * must be called under priv->lock and mac access */ void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask) { iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask); } /***************************************************************************** * * driver and module entry point * *****************************************************************************/ /* Hardware specific file defines the PCI IDs table for that hardware module */ static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = { #if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965) {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)}, {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)}, #endif /* CONFIG_IWL4965 */ {0} }; MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids); static struct pci_driver iwl4965_driver = { .name = DRV_NAME, .id_table = iwl4965_hw_card_ids, .probe = iwl4965_pci_probe, .remove = __devexit_p(iwl4965_pci_remove), .driver.pm = IWL_LEGACY_PM_OPS, }; static int __init iwl4965_init(void) { int ret; pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); pr_info(DRV_COPYRIGHT "\n"); ret = iwl4965_rate_control_register(); if (ret) { pr_err("Unable to register rate control algorithm: %d\n", ret); return ret; } ret = pci_register_driver(&iwl4965_driver); if (ret) { pr_err("Unable to initialize PCI module\n"); goto error_register; } return ret; error_register: iwl4965_rate_control_unregister(); return ret; } static void __exit iwl4965_exit(void) { pci_unregister_driver(&iwl4965_driver); iwl4965_rate_control_unregister(); } module_exit(iwl4965_exit); module_init(iwl4965_init); #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "debug output mask"); #endif module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO); MODULE_PARM_DESC(queues_num, "number of hw queues."); module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO); MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, S_IRUGO); MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
gpl-2.0
zf2-laser-dev/android_kernel_asus_msm8939
drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
2364
3007
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> *****************************************************************************/ #include "rtl_pci.h" #include "rtl_core.h" static void rtl8192_parse_pci_configuration(struct pci_dev *pdev, struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); u8 tmp; u16 LinkCtrlReg; pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &LinkCtrlReg); priv->NdisAdapter.LinkCtrlReg = (u8)LinkCtrlReg; RT_TRACE(COMP_INIT, "Link Control Register =%x\n", priv->NdisAdapter.LinkCtrlReg); pci_read_config_byte(pdev, 0x98, &tmp); tmp |= BIT4; pci_write_config_byte(pdev, 0x98, tmp); tmp = 0x17; pci_write_config_byte(pdev, 0x70f, tmp); } bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); u16 VenderID; u16 DeviceID; u8 RevisionID; u16 IrqLine; VenderID = pdev->vendor; DeviceID = pdev->device; RevisionID = pdev->revision; pci_read_config_word(pdev, 0x3C, &IrqLine); priv->card_8192 = priv->ops->nic_type; if (DeviceID == 0x8172) { switch (RevisionID) { case HAL_HW_PCI_REVISION_ID_8192PCIE: printk(KERN_INFO "Adapter(8192 PCI-E) is found - " "DeviceID=%x\n", DeviceID); priv->card_8192 = NIC_8192E; break; case HAL_HW_PCI_REVISION_ID_8192SE: printk(KERN_INFO "Adapter(8192SE) is found - " "DeviceID=%x\n", DeviceID); priv->card_8192 = NIC_8192SE; break; default: printk(KERN_INFO "UNKNOWN nic type(%4x:%4x)\n", pdev->vendor, pdev->device); priv->card_8192 = NIC_UNKNOWN; return false; } } if (priv->ops->nic_type != priv->card_8192) { printk(KERN_INFO "Detect info(%x) and hardware info(%x) not match!\n", priv->ops->nic_type, priv->card_8192); printk(KERN_INFO "Please select proper driver before install!!!!\n"); return false; } rtl8192_parse_pci_configuration(pdev, dev); return true; }
gpl-2.0
Flipkart/linux
drivers/gpu/drm/nouveau/nouveau_debugfs.c
4156
2099
/* * Copyright (C) 2009 Red Hat <bskeggs@redhat.com> * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ /* * Authors: * Ben Skeggs <bskeggs@redhat.com> */ #include "nouveau_debugfs.h" #include "nouveau_drm.h" static int nouveau_debugfs_vbios_image(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct nouveau_drm *drm = nouveau_drm(node->minor->dev); int i; for (i = 0; i < drm->vbios.length; i++) seq_printf(m, "%c", drm->vbios.data[i]); return 0; } static struct drm_info_list nouveau_debugfs_list[] = { { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, }; #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) int nouveau_debugfs_init(struct drm_minor *minor) { drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, minor->debugfs_root, minor); return 0; } void nouveau_debugfs_takedown(struct drm_minor *minor) { drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES, minor); }
gpl-2.0
AD5GB/kernel_n5_3.10-experimental
drivers/isdn/hysdn/hysdn_procconf.c
4668
13395
/* $Id: hysdn_procconf.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $ * * Linux driver for HYSDN cards, /proc/net filesystem dir and conf functions. * * written by Werner Cornelius (werner@titro.de) for Hypercope GmbH * * Copyright 1999 by Werner Cornelius (werner@titro.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/cred.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/mutex.h> #include <net/net_namespace.h> #include "hysdn_defs.h" static DEFINE_MUTEX(hysdn_conf_mutex); #define INFO_OUT_LEN 80 /* length of info line including lf */ /********************************************************/ /* defines and data structure for conf write operations */ /********************************************************/ #define CONF_STATE_DETECT 0 /* waiting for detect */ #define CONF_STATE_CONF 1 /* writing config data */ #define CONF_STATE_POF 2 /* writing pof data */ #define CONF_LINE_LEN 255 /* 255 chars max */ struct conf_writedata { hysdn_card *card; /* card the device is connected to */ int buf_size; /* actual number of bytes in the buffer */ int needed_size; /* needed size when reading pof */ int state; /* actual interface states from above constants */ unsigned char conf_line[CONF_LINE_LEN]; /* buffered conf line */ unsigned short channel; /* active channel number */ unsigned char *pof_buffer; /* buffer when writing pof */ }; /***********************************************************************/ /* process_line parses one config line and transfers it to the card if */ /* necessary. */ /* if the return value is negative an error occurred. */ /***********************************************************************/ static int process_line(struct conf_writedata *cnf) { unsigned char *cp = cnf->conf_line; int i; if (cnf->card->debug_flags & LOG_CNF_LINE) hysdn_addlog(cnf->card, "conf line: %s", cp); if (*cp == '-') { /* option */ cp++; /* point to option char */ if (*cp++ != 'c') return (0); /* option unknown or used */ i = 0; /* start value for channel */ while ((*cp <= '9') && (*cp >= '0')) i = i * 10 + *cp++ - '0'; /* get decimal number */ if (i > 65535) { if (cnf->card->debug_flags & LOG_CNF_MISC) hysdn_addlog(cnf->card, "conf channel invalid %d", i); return (-ERR_INV_CHAN); /* invalid channel */ } cnf->channel = i & 0xFFFF; /* set new channel number */ return (0); /* success */ } /* option */ if (*cp == '*') { /* line to send */ if (cnf->card->debug_flags & LOG_CNF_DATA) hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp); return (hysdn_tx_cfgline(cnf->card, cnf->conf_line + 1, cnf->channel)); /* send the line without * */ } /* line to send */ return (0); } /* process_line */ /***********************************/ /* conf file operations and tables */ /***********************************/ /****************************************************/ /* write conf file -> boot or send cfg line to card */ /****************************************************/ static ssize_t hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { struct conf_writedata *cnf; int i; unsigned char ch, *cp; if (!count) return (0); /* nothing to handle */ if (!(cnf = file->private_data)) return (-EFAULT); /* should never happen */ if (cnf->state == CONF_STATE_DETECT) { /* auto detect cnf or pof data */ if (copy_from_user(&ch, buf, 1)) /* get first char for detect */ return (-EFAULT); if (ch == 0x1A) { /* we detected a pof file */ if ((cnf->needed_size = pof_write_open(cnf->card, &cnf->pof_buffer)) <= 0) return (cnf->needed_size); /* an error occurred -> exit */ cnf->buf_size = 0; /* buffer is empty */ cnf->state = CONF_STATE_POF; /* new state */ } else { /* conf data has been detected */ cnf->buf_size = 0; /* buffer is empty */ cnf->state = CONF_STATE_CONF; /* requested conf data write */ if (cnf->card->state != CARD_STATE_RUN) return (-ERR_NOT_BOOTED); cnf->conf_line[CONF_LINE_LEN - 1] = 0; /* limit string length */ cnf->channel = 4098; /* default channel for output */ } } /* state was auto detect */ if (cnf->state == CONF_STATE_POF) { /* pof write active */ i = cnf->needed_size - cnf->buf_size; /* bytes still missing for write */ if (i <= 0) return (-EINVAL); /* size error handling pof */ if (i < count) count = i; /* limit requested number of bytes */ if (copy_from_user(cnf->pof_buffer + cnf->buf_size, buf, count)) return (-EFAULT); /* error while copying */ cnf->buf_size += count; if (cnf->needed_size == cnf->buf_size) { cnf->needed_size = pof_write_buffer(cnf->card, cnf->buf_size); /* write data */ if (cnf->needed_size <= 0) { cnf->card->state = CARD_STATE_BOOTERR; /* show boot error */ return (cnf->needed_size); /* an error occurred */ } cnf->buf_size = 0; /* buffer is empty again */ } } /* pof write active */ else { /* conf write active */ if (cnf->card->state != CARD_STATE_RUN) { if (cnf->card->debug_flags & LOG_CNF_MISC) hysdn_addlog(cnf->card, "cnf write denied -> not booted"); return (-ERR_NOT_BOOTED); } i = (CONF_LINE_LEN - 1) - cnf->buf_size; /* bytes available in buffer */ if (i > 0) { /* copy remaining bytes into buffer */ if (count > i) count = i; /* limit transfer */ if (copy_from_user(cnf->conf_line + cnf->buf_size, buf, count)) return (-EFAULT); /* error while copying */ i = count; /* number of chars in buffer */ cp = cnf->conf_line + cnf->buf_size; while (i) { /* search for end of line */ if ((*cp < ' ') && (*cp != 9)) break; /* end of line found */ cp++; i--; } /* search for end of line */ if (i) { /* delimiter found */ *cp++ = 0; /* string termination */ count -= (i - 1); /* subtract remaining bytes from count */ while ((i) && (*cp < ' ') && (*cp != 9)) { i--; /* discard next char */ count++; /* mark as read */ cp++; /* next char */ } cnf->buf_size = 0; /* buffer is empty after transfer */ if ((i = process_line(cnf)) < 0) /* handle the line */ count = i; /* return the error */ } /* delimiter found */ else { cnf->buf_size += count; /* add chars to string */ if (cnf->buf_size >= CONF_LINE_LEN - 1) { if (cnf->card->debug_flags & LOG_CNF_MISC) hysdn_addlog(cnf->card, "cnf line too long %d chars pos %d", cnf->buf_size, count); return (-ERR_CONF_LONG); } } /* not delimited */ } /* copy remaining bytes into buffer */ else { if (cnf->card->debug_flags & LOG_CNF_MISC) hysdn_addlog(cnf->card, "cnf line too long"); return (-ERR_CONF_LONG); } } /* conf write active */ return (count); } /* hysdn_conf_write */ /*******************************************/ /* read conf file -> output card info data */ /*******************************************/ static ssize_t hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off) { char *cp; if (!(file->f_mode & FMODE_READ)) return -EPERM; /* no permission to read */ if (!(cp = file->private_data)) return -EFAULT; /* should never happen */ return simple_read_from_buffer(buf, count, off, cp, strlen(cp)); } /* hysdn_conf_read */ /******************/ /* open conf file */ /******************/ static int hysdn_conf_open(struct inode *ino, struct file *filep) { hysdn_card *card; struct conf_writedata *cnf; char *cp, *tmp; /* now search the addressed card */ mutex_lock(&hysdn_conf_mutex); card = PDE_DATA(ino); if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x", filep->f_cred->fsuid, filep->f_cred->fsgid, filep->f_mode); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write boot file or conf line */ if (!(cnf = kmalloc(sizeof(struct conf_writedata), GFP_KERNEL))) { mutex_unlock(&hysdn_conf_mutex); return (-EFAULT); } cnf->card = card; cnf->buf_size = 0; /* nothing buffered */ cnf->state = CONF_STATE_DETECT; /* start auto detect */ filep->private_data = cnf; } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) { /* read access -> output card info data */ if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) { mutex_unlock(&hysdn_conf_mutex); return (-EFAULT); /* out of memory */ } filep->private_data = tmp; /* start of string */ /* first output a headline */ sprintf(tmp, "id bus slot type irq iobase dp-mem b-chans fax-chans state device"); cp = tmp; /* start of string */ while (*cp) cp++; while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN) *cp++ = ' '; *cp++ = '\n'; /* and now the data */ sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d %s", card->myid, card->bus, PCI_SLOT(card->devfn), card->brdtype, card->irq, card->iobase, card->membase, card->bchans, card->faxchans, card->state, hysdn_net_getname(card)); while (*cp) cp++; while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN) *cp++ = ' '; *cp++ = '\n'; *cp = 0; /* end of string */ } else { /* simultaneous read/write access forbidden ! */ mutex_unlock(&hysdn_conf_mutex); return (-EPERM); /* no permission this time */ } mutex_unlock(&hysdn_conf_mutex); return nonseekable_open(ino, filep); } /* hysdn_conf_open */ /***************************/ /* close a config file. */ /***************************/ static int hysdn_conf_close(struct inode *ino, struct file *filep) { hysdn_card *card; struct conf_writedata *cnf; int retval = 0; mutex_lock(&hysdn_conf_mutex); card = PDE_DATA(ino); if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x", filep->f_cred->fsuid, filep->f_cred->fsgid, filep->f_mode); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write boot file or conf line */ if (filep->private_data) { cnf = filep->private_data; if (cnf->state == CONF_STATE_POF) retval = pof_write_close(cnf->card); /* close the pof write */ kfree(filep->private_data); /* free allocated memory for buffer */ } /* handle write private data */ } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) { /* read access -> output card info data */ kfree(filep->private_data); /* release memory */ } mutex_unlock(&hysdn_conf_mutex); return (retval); } /* hysdn_conf_close */ /******************************************************/ /* table for conf filesystem functions defined above. */ /******************************************************/ static const struct file_operations conf_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = hysdn_conf_read, .write = hysdn_conf_write, .open = hysdn_conf_open, .release = hysdn_conf_close, }; /*****************************/ /* hysdn subdir in /proc/net */ /*****************************/ struct proc_dir_entry *hysdn_proc_entry = NULL; /*******************************************************************************/ /* hysdn_procconf_init is called when the module is loaded and after the cards */ /* have been detected. The needed proc dir and card config files are created. */ /* The log init is called at last. */ /*******************************************************************************/ int hysdn_procconf_init(void) { hysdn_card *card; unsigned char conf_name[20]; hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, init_net.proc_net); if (!hysdn_proc_entry) { printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n"); return (-1); } card = card_root; /* point to first card */ while (card) { sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); if ((card->procconf = (void *) proc_create_data(conf_name, S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry, &conf_fops, card)) != NULL) { hysdn_proclog_init(card); /* init the log file entry */ } card = card->next; /* next entry */ } printk(KERN_NOTICE "HYSDN: procfs initialised\n"); return (0); } /* hysdn_procconf_init */ /*************************************************************************************/ /* hysdn_procconf_release is called when the module is unloaded and before the cards */ /* resources are released. The module counter is assumed to be 0 ! */ /*************************************************************************************/ void hysdn_procconf_release(void) { hysdn_card *card; unsigned char conf_name[20]; card = card_root; /* start with first card */ while (card) { sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); if (card->procconf) remove_proc_entry(conf_name, hysdn_proc_entry); hysdn_proclog_release(card); /* init the log file entry */ card = card->next; /* point to next card */ } remove_proc_entry(PROC_SUBDIR_NAME, init_net.proc_net); }
gpl-2.0
davidmueller13/lt03lte_tw_kernel_5.1.1
drivers/net/wireless/ath/ath5k/desc.c
4924
22618
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /******************************\ Hardware Descriptor Functions \******************************/ #include "ath5k.h" #include "reg.h" #include "debug.h" /** * DOC: Hardware descriptor functions * * Here we handle the processing of the low-level hw descriptors * that hw reads and writes via DMA for each TX and RX attempt (that means * we can also have descriptors for failed TX/RX tries). We have two kind of * descriptors for RX and TX, control descriptors tell the hw how to send or * receive a packet where to read/write it from/to etc and status descriptors * that contain information about how the packet was sent or received (errors * included). * * Descriptor format is not exactly the same for each MAC chip version so we * have function pointers on &struct ath5k_hw we initialize at runtime based on * the chip used. */ /************************\ * TX Control descriptors * \************************/ /** * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @pkt_len: Frame length in bytes * @hdr_len: Header length in bytes (only used on AR5210) * @padsize: Any padding we've added to the frame length * @type: One of enum ath5k_pkt_type * @tx_power: Tx power in 0.5dB steps * @tx_rate0: HW idx for transmission rate * @tx_tries0: Max number of retransmissions * @key_index: Index on key table to use for encryption * @antenna_mode: Which antenna to use (0 for auto) * @flags: One of AR5K_TXDESC_* flags (desc.h) * @rtscts_rate: HW idx for RTS/CTS transmission rate * @rtscts_duration: What to put on duration field on the header of RTS/CTS * * Internal function to initialize a 2-Word TX control descriptor * found on AR5210 and AR5211 MACs chips. * * Returns 0 on success or -EINVAL on false input */ static int ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, int padsize, enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, unsigned int key_index, unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate, unsigned int rtscts_duration) { u32 frame_type; struct ath5k_hw_2w_tx_ctl *tx_ctl; unsigned int frame_len; tx_ctl = &desc->ud.ds_tx5210.tx_ctl; /* * Validate input * - Zero retries don't make sense. * - A zero rate will put the HW into a mode where it continuously sends * noise on the channel, so it is important to avoid this. */ if (unlikely(tx_tries0 == 0)) { ATH5K_ERR(ah, "zero retries\n"); WARN_ON(1); return -EINVAL; } if (unlikely(tx_rate0 == 0)) { ATH5K_ERR(ah, "zero rate\n"); WARN_ON(1); return -EINVAL; } /* Clear descriptor */ memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc)); /* Setup control descriptor */ /* Verify and set frame length */ /* remove padding we might have added before */ frame_len = pkt_len - padsize + FCS_LEN; if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN) return -EINVAL; tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN; /* Verify and set buffer length */ /* NB: beacon's BufLen must be a multiple of 4 bytes */ if (type == AR5K_PKT_TYPE_BEACON) pkt_len = roundup(pkt_len, 4); if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN) return -EINVAL; tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN; /* * Verify and set header length (only 5210) */ if (ah->ah_version == AR5K_AR5210) { if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210) return -EINVAL; tx_ctl->tx_control_0 |= AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210); } /*Differences between 5210-5211*/ if (ah->ah_version == AR5K_AR5210) { switch (type) { case AR5K_PKT_TYPE_BEACON: case AR5K_PKT_TYPE_PROBE_RESP: frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY; break; case AR5K_PKT_TYPE_PIFS: frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS; break; default: frame_type = type; break; } tx_ctl->tx_control_0 |= AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) | AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE); } else { tx_ctl->tx_control_0 |= AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) | AR5K_REG_SM(antenna_mode, AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT); tx_ctl->tx_control_1 |= AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211); } #define _TX_FLAGS(_c, _flag) \ if (flags & AR5K_TXDESC_##_flag) { \ tx_ctl->tx_control_##_c |= \ AR5K_2W_TX_DESC_CTL##_c##_##_flag; \ } #define _TX_FLAGS_5211(_c, _flag) \ if (flags & AR5K_TXDESC_##_flag) { \ tx_ctl->tx_control_##_c |= \ AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \ } _TX_FLAGS(0, CLRDMASK); _TX_FLAGS(0, INTREQ); _TX_FLAGS(0, RTSENA); if (ah->ah_version == AR5K_AR5211) { _TX_FLAGS_5211(0, VEOL); _TX_FLAGS_5211(1, NOACK); } #undef _TX_FLAGS #undef _TX_FLAGS_5211 /* * WEP crap */ if (key_index != AR5K_TXKEYIX_INVALID) { tx_ctl->tx_control_0 |= AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index, AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX); } /* * RTS/CTS Duration [5210 ?] */ if ((ah->ah_version == AR5K_AR5210) && (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA))) tx_ctl->tx_control_1 |= rtscts_duration & AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210; return 0; } /** * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @pkt_len: Frame length in bytes * @hdr_len: Header length in bytes (only used on AR5210) * @padsize: Any padding we've added to the frame length * @type: One of enum ath5k_pkt_type * @tx_power: Tx power in 0.5dB steps * @tx_rate0: HW idx for transmission rate * @tx_tries0: Max number of retransmissions * @key_index: Index on key table to use for encryption * @antenna_mode: Which antenna to use (0 for auto) * @flags: One of AR5K_TXDESC_* flags (desc.h) * @rtscts_rate: HW idx for RTS/CTS transmission rate * @rtscts_duration: What to put on duration field on the header of RTS/CTS * * Internal function to initialize a 4-Word TX control descriptor * found on AR5212 and later MACs chips. * * Returns 0 on success or -EINVAL on false input */ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, int padsize, enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, unsigned int key_index, unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate, unsigned int rtscts_duration) { struct ath5k_hw_4w_tx_ctl *tx_ctl; unsigned int frame_len; /* * Use local variables for these to reduce load/store access on * uncached memory */ u32 txctl0 = 0, txctl1 = 0, txctl2 = 0, txctl3 = 0; tx_ctl = &desc->ud.ds_tx5212.tx_ctl; /* * Validate input * - Zero retries don't make sense. * - A zero rate will put the HW into a mode where it continuously sends * noise on the channel, so it is important to avoid this. */ if (unlikely(tx_tries0 == 0)) { ATH5K_ERR(ah, "zero retries\n"); WARN_ON(1); return -EINVAL; } if (unlikely(tx_rate0 == 0)) { ATH5K_ERR(ah, "zero rate\n"); WARN_ON(1); return -EINVAL; } tx_power += ah->ah_txpower.txp_offset; if (tx_power > AR5K_TUNE_MAX_TXPOWER) tx_power = AR5K_TUNE_MAX_TXPOWER; /* Clear descriptor status area */ memset(&desc->ud.ds_tx5212.tx_stat, 0, sizeof(desc->ud.ds_tx5212.tx_stat)); /* Setup control descriptor */ /* Verify and set frame length */ /* remove padding we might have added before */ frame_len = pkt_len - padsize + FCS_LEN; if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) return -EINVAL; txctl0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN; /* Verify and set buffer length */ /* NB: beacon's BufLen must be a multiple of 4 bytes */ if (type == AR5K_PKT_TYPE_BEACON) pkt_len = roundup(pkt_len, 4); if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN) return -EINVAL; txctl1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN; txctl0 |= AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) | AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT); txctl1 |= AR5K_REG_SM(type, AR5K_4W_TX_DESC_CTL1_FRAME_TYPE); txctl2 = AR5K_REG_SM(tx_tries0, AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0); txctl3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0; #define _TX_FLAGS(_c, _flag) \ if (flags & AR5K_TXDESC_##_flag) { \ txctl##_c |= AR5K_4W_TX_DESC_CTL##_c##_##_flag; \ } _TX_FLAGS(0, CLRDMASK); _TX_FLAGS(0, VEOL); _TX_FLAGS(0, INTREQ); _TX_FLAGS(0, RTSENA); _TX_FLAGS(0, CTSENA); _TX_FLAGS(1, NOACK); #undef _TX_FLAGS /* * WEP crap */ if (key_index != AR5K_TXKEYIX_INVALID) { txctl0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; txctl1 |= AR5K_REG_SM(key_index, AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX); } /* * RTS/CTS */ if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) { if ((flags & AR5K_TXDESC_RTSENA) && (flags & AR5K_TXDESC_CTSENA)) return -EINVAL; txctl2 |= rtscts_duration & AR5K_4W_TX_DESC_CTL2_RTS_DURATION; txctl3 |= AR5K_REG_SM(rtscts_rate, AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE); } tx_ctl->tx_control_0 = txctl0; tx_ctl->tx_control_1 = txctl1; tx_ctl->tx_control_2 = txctl2; tx_ctl->tx_control_3 = txctl3; return 0; } /** * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @tx_rate1: HW idx for rate used on transmission series 1 * @tx_tries1: Max number of retransmissions for transmission series 1 * @tx_rate2: HW idx for rate used on transmission series 2 * @tx_tries2: Max number of retransmissions for transmission series 2 * @tx_rate3: HW idx for rate used on transmission series 3 * @tx_tries3: Max number of retransmissions for transmission series 3 * * Multi rate retry (MRR) tx control descriptors are available only on AR5212 * MACs, they are part of the normal 4-word tx control descriptor (see above) * but we handle them through a separate function for better abstraction. * * Returns 0 on success or -EINVAL on invalid input */ int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, u_int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2, u_int tx_rate3, u_int tx_tries3) { struct ath5k_hw_4w_tx_ctl *tx_ctl; /* no mrr support for cards older than 5212 */ if (ah->ah_version < AR5K_AR5212) return 0; /* * Rates can be 0 as long as the retry count is 0 too. * A zero rate and nonzero retry count will put the HW into a mode where * it continuously sends noise on the channel, so it is important to * avoid this. */ if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) || (tx_rate2 == 0 && tx_tries2 != 0) || (tx_rate3 == 0 && tx_tries3 != 0))) { ATH5K_ERR(ah, "zero rate\n"); WARN_ON(1); return -EINVAL; } if (ah->ah_version == AR5K_AR5212) { tx_ctl = &desc->ud.ds_tx5212.tx_ctl; #define _XTX_TRIES(_n) \ if (tx_tries##_n) { \ tx_ctl->tx_control_2 |= \ AR5K_REG_SM(tx_tries##_n, \ AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \ tx_ctl->tx_control_3 |= \ AR5K_REG_SM(tx_rate##_n, \ AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \ } _XTX_TRIES(1); _XTX_TRIES(2); _XTX_TRIES(3); #undef _XTX_TRIES return 1; } return 0; } /***********************\ * TX Status descriptors * \***********************/ /** * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1 * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @ts: The &struct ath5k_tx_status */ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_tx_status *ts) { struct ath5k_hw_2w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; tx_ctl = &desc->ud.ds_tx5210.tx_ctl; tx_status = &desc->ud.ds_tx5210.tx_stat; /* No frame has been send or error */ if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0)) return -EINPROGRESS; /* * Get descriptor status */ ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0, AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0, AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); ts->ts_final_retry = AR5K_REG_MS(tx_status->tx_status_0, AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); /*TODO: ts->ts_virtcol + test*/ ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1, AR5K_DESC_TX_STATUS1_SEQ_NUM); ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); ts->ts_antenna = 1; ts->ts_status = 0; ts->ts_final_idx = 0; if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) { if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES) ts->ts_status |= AR5K_TXERR_XRETRY; if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) ts->ts_status |= AR5K_TXERR_FIFO; if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED) ts->ts_status |= AR5K_TXERR_FILT; } return 0; } /** * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212 * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @ts: The &struct ath5k_tx_status */ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_tx_status *ts) { struct ath5k_hw_4w_tx_ctl *tx_ctl; struct ath5k_hw_tx_status *tx_status; u32 txstat0, txstat1; tx_ctl = &desc->ud.ds_tx5212.tx_ctl; tx_status = &desc->ud.ds_tx5212.tx_stat; txstat1 = ACCESS_ONCE(tx_status->tx_status_1); /* No frame has been send or error */ if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) return -EINPROGRESS; txstat0 = ACCESS_ONCE(tx_status->tx_status_0); /* * Get descriptor status */ ts->ts_tstamp = AR5K_REG_MS(txstat0, AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); ts->ts_shortretry = AR5K_REG_MS(txstat0, AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); ts->ts_final_retry = AR5K_REG_MS(txstat0, AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); ts->ts_seqnum = AR5K_REG_MS(txstat1, AR5K_DESC_TX_STATUS1_SEQ_NUM); ts->ts_rssi = AR5K_REG_MS(txstat1, AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); ts->ts_antenna = (txstat1 & AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1; ts->ts_status = 0; ts->ts_final_idx = AR5K_REG_MS(txstat1, AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212); /* TX error */ if (!(txstat0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) { if (txstat0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES) ts->ts_status |= AR5K_TXERR_XRETRY; if (txstat0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) ts->ts_status |= AR5K_TXERR_FIFO; if (txstat0 & AR5K_DESC_TX_STATUS0_FILTERED) ts->ts_status |= AR5K_TXERR_FILT; } return 0; } /****************\ * RX Descriptors * \****************/ /** * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @size: RX buffer length in bytes * @flags: One of AR5K_RXDESC_* flags */ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, u32 size, unsigned int flags) { struct ath5k_hw_rx_ctl *rx_ctl; rx_ctl = &desc->ud.ds_rx.rx_ctl; /* * Clear the descriptor * If we don't clean the status descriptor, * while scanning we get too many results, * most of them virtual, after some secs * of scanning system hangs. M.F. */ memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc)); if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN)) return -EINVAL; /* Setup descriptor */ rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN; if (flags & AR5K_RXDESC_INTREQ) rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ; return 0; } /** * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1 * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @rs: The &struct ath5k_rx_status * * Internal function used to process an RX status descriptor * on AR5210/5211 MAC. * * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e * frame yet. */ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; rx_status = &desc->ud.ds_rx.rx_stat; /* No frame received / not ready */ if (unlikely(!(rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_DONE))) return -EINPROGRESS; memset(rs, 0, sizeof(struct ath5k_rx_status)); /* * Frame receive status */ rs->rs_datalen = rx_status->rx_status_0 & AR5K_5210_RX_DESC_STATUS0_DATA_LEN; rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0, AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL); rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0, AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE); rs->rs_more = !!(rx_status->rx_status_0 & AR5K_5210_RX_DESC_STATUS0_MORE); /* TODO: this timestamp is 13 bit, later on we assume 15 bit! * also the HAL code for 5210 says the timestamp is bits [10..22] of the * TSF, and extends the timestamp here to 15 bit. * we need to check on 5210... */ rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); if (ah->ah_version == AR5K_AR5211) rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0, AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211); else rs->rs_antenna = (rx_status->rx_status_0 & AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210) ? 2 : 1; /* * Key table status */ if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID) rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_KEY_INDEX); else rs->rs_keyix = AR5K_RXKEYIX_INVALID; /* * Receive/descriptor errors */ if (!(rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_CRC_ERROR) rs->rs_status |= AR5K_RXERR_CRC; /* only on 5210 */ if ((ah->ah_version == AR5K_AR5210) && (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210)) rs->rs_status |= AR5K_RXERR_FIFO; if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) { rs->rs_status |= AR5K_RXERR_PHY; rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1, AR5K_5210_RX_DESC_STATUS1_PHY_ERROR); } if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR) rs->rs_status |= AR5K_RXERR_DECRYPT; } return 0; } /** * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212 * @ah: The &struct ath5k_hw * @desc: The &struct ath5k_desc * @rs: The &struct ath5k_rx_status * * Internal function used to process an RX status descriptor * on AR5212 and later MAC. * * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e * frame yet. */ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, struct ath5k_desc *desc, struct ath5k_rx_status *rs) { struct ath5k_hw_rx_status *rx_status; u32 rxstat0, rxstat1; rx_status = &desc->ud.ds_rx.rx_stat; rxstat1 = ACCESS_ONCE(rx_status->rx_status_1); /* No frame received / not ready */ if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) return -EINPROGRESS; memset(rs, 0, sizeof(struct ath5k_rx_status)); rxstat0 = ACCESS_ONCE(rx_status->rx_status_0); /* * Frame receive status */ rs->rs_datalen = rxstat0 & AR5K_5212_RX_DESC_STATUS0_DATA_LEN; rs->rs_rssi = AR5K_REG_MS(rxstat0, AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL); rs->rs_rate = AR5K_REG_MS(rxstat0, AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE); rs->rs_antenna = AR5K_REG_MS(rxstat0, AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA); rs->rs_more = !!(rxstat0 & AR5K_5212_RX_DESC_STATUS0_MORE); rs->rs_tstamp = AR5K_REG_MS(rxstat1, AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); /* * Key table status */ if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID) rs->rs_keyix = AR5K_REG_MS(rxstat1, AR5K_5212_RX_DESC_STATUS1_KEY_INDEX); else rs->rs_keyix = AR5K_RXKEYIX_INVALID; /* * Receive/descriptor errors */ if (!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) { if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR) rs->rs_status |= AR5K_RXERR_CRC; if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) { rs->rs_status |= AR5K_RXERR_PHY; rs->rs_phyerr = AR5K_REG_MS(rxstat1, AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE); if (!ah->ah_capabilities.cap_has_phyerr_counters) ath5k_ani_phy_error_report(ah, rs->rs_phyerr); } if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR) rs->rs_status |= AR5K_RXERR_DECRYPT; if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_MIC_ERROR) rs->rs_status |= AR5K_RXERR_MIC; } return 0; } /********\ * Attach * \********/ /** * ath5k_hw_init_desc_functions() - Init function pointers inside ah * @ah: The &struct ath5k_hw * * Maps the internal descriptor functions to the function pointers on ah, used * from above. This is used as an abstraction layer to handle the various chips * the same way. */ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah) { if (ah->ah_version == AR5K_AR5212) { ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status; ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status; } else if (ah->ah_version <= AR5K_AR5211) { ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc; ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status; ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status; } else return -ENOTSUPP; return 0; }
gpl-2.0
cnexus/kernel_412_jewel_d2
drivers/net/wireless/ath/ath5k/initvals.c
4924
51537
/* * Initial register settings functions * * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ #include "ath5k.h" #include "reg.h" #include "debug.h" /** * struct ath5k_ini - Mode-independent initial register writes * @ini_register: Register address * @ini_value: Default value * @ini_mode: 0 to write 1 to read (and clear) */ struct ath5k_ini { u16 ini_register; u32 ini_value; enum { AR5K_INI_WRITE = 0, /* Default */ AR5K_INI_READ = 1, } ini_mode; }; /** * struct ath5k_ini_mode - Mode specific initial register values * @mode_register: Register address * @mode_value: Set of values for each enum ath5k_driver_mode */ struct ath5k_ini_mode { u16 mode_register; u32 mode_value[3]; }; /* Initial register settings for AR5210 */ static const struct ath5k_ini ar5210_ini[] = { /* PCU and MAC registers */ { AR5K_NOQCU_TXDP0, 0 }, { AR5K_NOQCU_TXDP1, 0 }, { AR5K_RXDP, 0 }, { AR5K_CR, 0 }, { AR5K_ISR, 0, AR5K_INI_READ }, { AR5K_IMR, 0 }, { AR5K_IER, AR5K_IER_DISABLE }, { AR5K_BSR, 0, AR5K_INI_READ }, { AR5K_TXCFG, AR5K_DMASIZE_128B }, { AR5K_RXCFG, AR5K_DMASIZE_128B }, { AR5K_CFG, AR5K_INIT_CFG }, { AR5K_TOPS, 8 }, { AR5K_RXNOFRM, 8 }, { AR5K_RPGTO, 0 }, { AR5K_TXNOFRM, 0 }, { AR5K_SFR, 0 }, { AR5K_MIBC, 0 }, { AR5K_MISC, 0 }, { AR5K_RX_FILTER_5210, 0 }, { AR5K_MCAST_FILTER0_5210, 0 }, { AR5K_MCAST_FILTER1_5210, 0 }, { AR5K_TX_MASK0, 0 }, { AR5K_TX_MASK1, 0 }, { AR5K_CLR_TMASK, 0 }, { AR5K_TRIG_LVL, AR5K_TUNE_MIN_TX_FIFO_THRES }, { AR5K_DIAG_SW_5210, 0 }, { AR5K_RSSI_THR, AR5K_TUNE_RSSI_THRES }, { AR5K_TSF_L32_5210, 0 }, { AR5K_TIMER0_5210, 0 }, { AR5K_TIMER1_5210, 0xffffffff }, { AR5K_TIMER2_5210, 0xffffffff }, { AR5K_TIMER3_5210, 1 }, { AR5K_CFP_DUR_5210, 0 }, { AR5K_CFP_PERIOD_5210, 0 }, /* PHY registers */ { AR5K_PHY(0), 0x00000047 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY(3), 0x09848ea6 }, { AR5K_PHY(4), 0x3d32e000 }, { AR5K_PHY(5), 0x0000076b }, { AR5K_PHY_ACT, AR5K_PHY_ACT_DISABLE }, { AR5K_PHY(8), 0x02020200 }, { AR5K_PHY(9), 0x00000e0e }, { AR5K_PHY(10), 0x0a020201 }, { AR5K_PHY(11), 0x00036ffc }, { AR5K_PHY(12), 0x00000000 }, { AR5K_PHY(13), 0x00000e0e }, { AR5K_PHY(14), 0x00000007 }, { AR5K_PHY(15), 0x00020100 }, { AR5K_PHY(16), 0x89630000 }, { AR5K_PHY(17), 0x1372169c }, { AR5K_PHY(18), 0x0018b633 }, { AR5K_PHY(19), 0x1284613c }, { AR5K_PHY(20), 0x0de8b8e0 }, { AR5K_PHY(21), 0x00074859 }, { AR5K_PHY(22), 0x7e80beba }, { AR5K_PHY(23), 0x313a665e }, { AR5K_PHY_AGCCTL, 0x00001d08 }, { AR5K_PHY(25), 0x0001ce00 }, { AR5K_PHY(26), 0x409a4190 }, { AR5K_PHY(28), 0x0000000f }, { AR5K_PHY(29), 0x00000080 }, { AR5K_PHY(30), 0x00000004 }, { AR5K_PHY(31), 0x00000018 }, /* 0x987c */ { AR5K_PHY(64), 0x00000000 }, /* 0x9900 */ { AR5K_PHY(65), 0x00000000 }, { AR5K_PHY(66), 0x00000000 }, { AR5K_PHY(67), 0x00800000 }, { AR5K_PHY(68), 0x00000003 }, /* BB gain table (64bytes) */ { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000020 }, { AR5K_BB_GAIN(2), 0x00000010 }, { AR5K_BB_GAIN(3), 0x00000030 }, { AR5K_BB_GAIN(4), 0x00000008 }, { AR5K_BB_GAIN(5), 0x00000028 }, { AR5K_BB_GAIN(6), 0x00000028 }, { AR5K_BB_GAIN(7), 0x00000004 }, { AR5K_BB_GAIN(8), 0x00000024 }, { AR5K_BB_GAIN(9), 0x00000014 }, { AR5K_BB_GAIN(10), 0x00000034 }, { AR5K_BB_GAIN(11), 0x0000000c }, { AR5K_BB_GAIN(12), 0x0000002c }, { AR5K_BB_GAIN(13), 0x00000002 }, { AR5K_BB_GAIN(14), 0x00000022 }, { AR5K_BB_GAIN(15), 0x00000012 }, { AR5K_BB_GAIN(16), 0x00000032 }, { AR5K_BB_GAIN(17), 0x0000000a }, { AR5K_BB_GAIN(18), 0x0000002a }, { AR5K_BB_GAIN(19), 0x00000001 }, { AR5K_BB_GAIN(20), 0x00000021 }, { AR5K_BB_GAIN(21), 0x00000011 }, { AR5K_BB_GAIN(22), 0x00000031 }, { AR5K_BB_GAIN(23), 0x00000009 }, { AR5K_BB_GAIN(24), 0x00000029 }, { AR5K_BB_GAIN(25), 0x00000005 }, { AR5K_BB_GAIN(26), 0x00000025 }, { AR5K_BB_GAIN(27), 0x00000015 }, { AR5K_BB_GAIN(28), 0x00000035 }, { AR5K_BB_GAIN(29), 0x0000000d }, { AR5K_BB_GAIN(30), 0x0000002d }, { AR5K_BB_GAIN(31), 0x00000003 }, { AR5K_BB_GAIN(32), 0x00000023 }, { AR5K_BB_GAIN(33), 0x00000013 }, { AR5K_BB_GAIN(34), 0x00000033 }, { AR5K_BB_GAIN(35), 0x0000000b }, { AR5K_BB_GAIN(36), 0x0000002b }, { AR5K_BB_GAIN(37), 0x00000007 }, { AR5K_BB_GAIN(38), 0x00000027 }, { AR5K_BB_GAIN(39), 0x00000017 }, { AR5K_BB_GAIN(40), 0x00000037 }, { AR5K_BB_GAIN(41), 0x0000000f }, { AR5K_BB_GAIN(42), 0x0000002f }, { AR5K_BB_GAIN(43), 0x0000002f }, { AR5K_BB_GAIN(44), 0x0000002f }, { AR5K_BB_GAIN(45), 0x0000002f }, { AR5K_BB_GAIN(46), 0x0000002f }, { AR5K_BB_GAIN(47), 0x0000002f }, { AR5K_BB_GAIN(48), 0x0000002f }, { AR5K_BB_GAIN(49), 0x0000002f }, { AR5K_BB_GAIN(50), 0x0000002f }, { AR5K_BB_GAIN(51), 0x0000002f }, { AR5K_BB_GAIN(52), 0x0000002f }, { AR5K_BB_GAIN(53), 0x0000002f }, { AR5K_BB_GAIN(54), 0x0000002f }, { AR5K_BB_GAIN(55), 0x0000002f }, { AR5K_BB_GAIN(56), 0x0000002f }, { AR5K_BB_GAIN(57), 0x0000002f }, { AR5K_BB_GAIN(58), 0x0000002f }, { AR5K_BB_GAIN(59), 0x0000002f }, { AR5K_BB_GAIN(60), 0x0000002f }, { AR5K_BB_GAIN(61), 0x0000002f }, { AR5K_BB_GAIN(62), 0x0000002f }, { AR5K_BB_GAIN(63), 0x0000002f }, /* 5110 RF gain table (64btes) */ { AR5K_RF_GAIN(0), 0x0000001d }, { AR5K_RF_GAIN(1), 0x0000005d }, { AR5K_RF_GAIN(2), 0x0000009d }, { AR5K_RF_GAIN(3), 0x000000dd }, { AR5K_RF_GAIN(4), 0x0000011d }, { AR5K_RF_GAIN(5), 0x00000021 }, { AR5K_RF_GAIN(6), 0x00000061 }, { AR5K_RF_GAIN(7), 0x000000a1 }, { AR5K_RF_GAIN(8), 0x000000e1 }, { AR5K_RF_GAIN(9), 0x00000031 }, { AR5K_RF_GAIN(10), 0x00000071 }, { AR5K_RF_GAIN(11), 0x000000b1 }, { AR5K_RF_GAIN(12), 0x0000001c }, { AR5K_RF_GAIN(13), 0x0000005c }, { AR5K_RF_GAIN(14), 0x00000029 }, { AR5K_RF_GAIN(15), 0x00000069 }, { AR5K_RF_GAIN(16), 0x000000a9 }, { AR5K_RF_GAIN(17), 0x00000020 }, { AR5K_RF_GAIN(18), 0x00000019 }, { AR5K_RF_GAIN(19), 0x00000059 }, { AR5K_RF_GAIN(20), 0x00000099 }, { AR5K_RF_GAIN(21), 0x00000030 }, { AR5K_RF_GAIN(22), 0x00000005 }, { AR5K_RF_GAIN(23), 0x00000025 }, { AR5K_RF_GAIN(24), 0x00000065 }, { AR5K_RF_GAIN(25), 0x000000a5 }, { AR5K_RF_GAIN(26), 0x00000028 }, { AR5K_RF_GAIN(27), 0x00000068 }, { AR5K_RF_GAIN(28), 0x0000001f }, { AR5K_RF_GAIN(29), 0x0000001e }, { AR5K_RF_GAIN(30), 0x00000018 }, { AR5K_RF_GAIN(31), 0x00000058 }, { AR5K_RF_GAIN(32), 0x00000098 }, { AR5K_RF_GAIN(33), 0x00000003 }, { AR5K_RF_GAIN(34), 0x00000004 }, { AR5K_RF_GAIN(35), 0x00000044 }, { AR5K_RF_GAIN(36), 0x00000084 }, { AR5K_RF_GAIN(37), 0x00000013 }, { AR5K_RF_GAIN(38), 0x00000012 }, { AR5K_RF_GAIN(39), 0x00000052 }, { AR5K_RF_GAIN(40), 0x00000092 }, { AR5K_RF_GAIN(41), 0x000000d2 }, { AR5K_RF_GAIN(42), 0x0000002b }, { AR5K_RF_GAIN(43), 0x0000002a }, { AR5K_RF_GAIN(44), 0x0000006a }, { AR5K_RF_GAIN(45), 0x000000aa }, { AR5K_RF_GAIN(46), 0x0000001b }, { AR5K_RF_GAIN(47), 0x0000001a }, { AR5K_RF_GAIN(48), 0x0000005a }, { AR5K_RF_GAIN(49), 0x0000009a }, { AR5K_RF_GAIN(50), 0x000000da }, { AR5K_RF_GAIN(51), 0x00000006 }, { AR5K_RF_GAIN(52), 0x00000006 }, { AR5K_RF_GAIN(53), 0x00000006 }, { AR5K_RF_GAIN(54), 0x00000006 }, { AR5K_RF_GAIN(55), 0x00000006 }, { AR5K_RF_GAIN(56), 0x00000006 }, { AR5K_RF_GAIN(57), 0x00000006 }, { AR5K_RF_GAIN(58), 0x00000006 }, { AR5K_RF_GAIN(59), 0x00000006 }, { AR5K_RF_GAIN(60), 0x00000006 }, { AR5K_RF_GAIN(61), 0x00000006 }, { AR5K_RF_GAIN(62), 0x00000006 }, { AR5K_RF_GAIN(63), 0x00000006 }, /* PHY activation */ { AR5K_PHY(53), 0x00000020 }, { AR5K_PHY(51), 0x00000004 }, { AR5K_PHY(50), 0x00060106 }, { AR5K_PHY(39), 0x0000006d }, { AR5K_PHY(48), 0x00000000 }, { AR5K_PHY(52), 0x00000014 }, { AR5K_PHY_ACT, AR5K_PHY_ACT_ENABLE }, }; /* Initial register settings for AR5211 */ static const struct ath5k_ini ar5211_ini[] = { { AR5K_RXDP, 0x00000000 }, { AR5K_RTSD0, 0x84849c9c }, { AR5K_RTSD1, 0x7c7c7c7c }, { AR5K_RXCFG, 0x00000005 }, { AR5K_MIBC, 0x00000000 }, { AR5K_TOPS, 0x00000008 }, { AR5K_RXNOFRM, 0x00000008 }, { AR5K_TXNOFRM, 0x00000010 }, { AR5K_RPGTO, 0x00000000 }, { AR5K_RFCNT, 0x0000001f }, { AR5K_QUEUE_TXDP(0), 0x00000000 }, { AR5K_QUEUE_TXDP(1), 0x00000000 }, { AR5K_QUEUE_TXDP(2), 0x00000000 }, { AR5K_QUEUE_TXDP(3), 0x00000000 }, { AR5K_QUEUE_TXDP(4), 0x00000000 }, { AR5K_QUEUE_TXDP(5), 0x00000000 }, { AR5K_QUEUE_TXDP(6), 0x00000000 }, { AR5K_QUEUE_TXDP(7), 0x00000000 }, { AR5K_QUEUE_TXDP(8), 0x00000000 }, { AR5K_QUEUE_TXDP(9), 0x00000000 }, { AR5K_DCU_FP, 0x00000000 }, { AR5K_STA_ID1, 0x00000000 }, { AR5K_BSS_ID0, 0x00000000 }, { AR5K_BSS_ID1, 0x00000000 }, { AR5K_RSSI_THR, 0x00000000 }, { AR5K_CFP_PERIOD_5211, 0x00000000 }, { AR5K_TIMER0_5211, 0x00000030 }, { AR5K_TIMER1_5211, 0x0007ffff }, { AR5K_TIMER2_5211, 0x01ffffff }, { AR5K_TIMER3_5211, 0x00000031 }, { AR5K_CFP_DUR_5211, 0x00000000 }, { AR5K_RX_FILTER_5211, 0x00000000 }, { AR5K_MCAST_FILTER0_5211, 0x00000000 }, { AR5K_MCAST_FILTER1_5211, 0x00000002 }, { AR5K_DIAG_SW_5211, 0x00000000 }, { AR5K_ADDAC_TEST, 0x00000000 }, { AR5K_DEFAULT_ANTENNA, 0x00000000 }, /* PHY registers */ { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY(3), 0x2d849093 }, { AR5K_PHY(4), 0x7d32e000 }, { AR5K_PHY(5), 0x00000f6b }, { AR5K_PHY_ACT, 0x00000000 }, { AR5K_PHY(11), 0x00026ffe }, { AR5K_PHY(12), 0x00000000 }, { AR5K_PHY(15), 0x00020100 }, { AR5K_PHY(16), 0x206a017a }, { AR5K_PHY(19), 0x1284613c }, { AR5K_PHY(21), 0x00000859 }, { AR5K_PHY(26), 0x409a4190 }, /* 0x9868 */ { AR5K_PHY(27), 0x050cb081 }, { AR5K_PHY(28), 0x0000000f }, { AR5K_PHY(29), 0x00000080 }, { AR5K_PHY(30), 0x0000000c }, { AR5K_PHY(64), 0x00000000 }, { AR5K_PHY(65), 0x00000000 }, { AR5K_PHY(66), 0x00000000 }, { AR5K_PHY(67), 0x00800000 }, { AR5K_PHY(68), 0x00000001 }, { AR5K_PHY(71), 0x0000092a }, { AR5K_PHY_IQ, 0x00000000 }, { AR5K_PHY(73), 0x00058a05 }, { AR5K_PHY(74), 0x00000001 }, { AR5K_PHY(75), 0x00000000 }, { AR5K_PHY_PAPD_PROBE, 0x00000000 }, { AR5K_PHY(77), 0x00000000 }, /* 0x9934 */ { AR5K_PHY(78), 0x00000000 }, /* 0x9938 */ { AR5K_PHY(79), 0x0000003f }, /* 0x993c */ { AR5K_PHY(80), 0x00000004 }, { AR5K_PHY(82), 0x00000000 }, { AR5K_PHY(83), 0x00000000 }, { AR5K_PHY(84), 0x00000000 }, { AR5K_PHY_RADAR, 0x5d50f14c }, { AR5K_PHY(86), 0x00000018 }, { AR5K_PHY(87), 0x004b6a8e }, /* Initial Power table (32bytes) * common on all cards/modes. * Note: Table is rewritten during * txpower setup later using calibration * data etc. so next write is non-common */ { AR5K_PHY_PCDAC_TXPOWER(1), 0x06ff05ff }, { AR5K_PHY_PCDAC_TXPOWER(2), 0x07ff07ff }, { AR5K_PHY_PCDAC_TXPOWER(3), 0x08ff08ff }, { AR5K_PHY_PCDAC_TXPOWER(4), 0x09ff09ff }, { AR5K_PHY_PCDAC_TXPOWER(5), 0x0aff0aff }, { AR5K_PHY_PCDAC_TXPOWER(6), 0x0bff0bff }, { AR5K_PHY_PCDAC_TXPOWER(7), 0x0cff0cff }, { AR5K_PHY_PCDAC_TXPOWER(8), 0x0dff0dff }, { AR5K_PHY_PCDAC_TXPOWER(9), 0x0fff0eff }, { AR5K_PHY_PCDAC_TXPOWER(10), 0x12ff12ff }, { AR5K_PHY_PCDAC_TXPOWER(11), 0x14ff13ff }, { AR5K_PHY_PCDAC_TXPOWER(12), 0x16ff15ff }, { AR5K_PHY_PCDAC_TXPOWER(13), 0x19ff17ff }, { AR5K_PHY_PCDAC_TXPOWER(14), 0x1bff1aff }, { AR5K_PHY_PCDAC_TXPOWER(15), 0x1eff1dff }, { AR5K_PHY_PCDAC_TXPOWER(16), 0x23ff20ff }, { AR5K_PHY_PCDAC_TXPOWER(17), 0x27ff25ff }, { AR5K_PHY_PCDAC_TXPOWER(18), 0x2cff29ff }, { AR5K_PHY_PCDAC_TXPOWER(19), 0x31ff2fff }, { AR5K_PHY_PCDAC_TXPOWER(20), 0x37ff34ff }, { AR5K_PHY_PCDAC_TXPOWER(21), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(22), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(23), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(24), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(25), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(26), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(27), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(28), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(29), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(30), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(31), 0x3aff3aff }, { AR5K_PHY_CCKTXCTL, 0x00000000 }, { AR5K_PHY(642), 0x503e4646 }, { AR5K_PHY_GAIN_2GHZ, 0x6480416c }, { AR5K_PHY(644), 0x0199a003 }, { AR5K_PHY(645), 0x044cd610 }, { AR5K_PHY(646), 0x13800040 }, { AR5K_PHY(647), 0x1be00060 }, { AR5K_PHY(648), 0x0c53800a }, { AR5K_PHY(649), 0x0014df3b }, { AR5K_PHY(650), 0x000001b5 }, { AR5K_PHY(651), 0x00000020 }, }; /* Initial mode-specific settings for AR5211 * 5211 supports OFDM-only g (draft g) but we * need to test it ! */ static const struct ath5k_ini_mode ar5211_ini_mode[] = { { AR5K_TXCFG, /* A B G */ { 0x00000015, 0x0000001d, 0x00000015 } }, { AR5K_QUEUE_DFS_LOCAL_IFS(0), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(1), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(2), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(3), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(4), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(5), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(6), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(7), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(8), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(9), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_DCU_GBL_IFS_SLOT, { 0x00000168, 0x000001b8, 0x00000168 } }, { AR5K_DCU_GBL_IFS_SIFS, { 0x00000230, 0x000000b0, 0x00000230 } }, { AR5K_DCU_GBL_IFS_EIFS, { 0x00000d98, 0x00001f48, 0x00000d98 } }, { AR5K_DCU_GBL_IFS_MISC, { 0x0000a0e0, 0x00005880, 0x0000a0e0 } }, { AR5K_TIME_OUT, { 0x04000400, 0x20003000, 0x04000400 } }, { AR5K_USEC_5211, { 0x0e8d8fa7, 0x01608f95, 0x0e8d8fa7 } }, { AR5K_PHY(8), { 0x02020200, 0x02010200, 0x02020200 } }, { AR5K_PHY_RF_CTL2, { 0x00000e0e, 0x00000707, 0x00000e0e } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05010000, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY_SETTLING, { 0x1372169c, 0x137216a8, 0x1372169c } }, { AR5K_PHY_GAIN, { 0x0018ba67, 0x0018ba69, 0x0018ba69 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } }, { AR5K_PHY_AGCCOARSE, { 0x31375d5e, 0x313a5d5e, 0x31375d5e } }, { AR5K_PHY_AGCCTL, { 0x0000bd10, 0x0000bd38, 0x0000bd10 } }, { AR5K_PHY_NF, { 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, { AR5K_PHY_RX_DELAY, { 0x00002710, 0x0000157c, 0x00002710 } }, { AR5K_PHY(70), { 0x00000190, 0x00000084, 0x00000190 } }, { AR5K_PHY_FRAME_CTL_5211, { 0x6fe01020, 0x6fe00920, 0x6fe01020 } }, { AR5K_PHY_PCDAC_TXPOWER_BASE, { 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } }, { AR5K_RF_BUFFER_CONTROL_4, { 0x00000010, 0x00000010, 0x00000010 } }, }; /* Initial register settings for AR5212 and newer chips */ static const struct ath5k_ini ar5212_ini_common_start[] = { { AR5K_RXDP, 0x00000000 }, { AR5K_RXCFG, 0x00000005 }, { AR5K_MIBC, 0x00000000 }, { AR5K_TOPS, 0x00000008 }, { AR5K_RXNOFRM, 0x00000008 }, { AR5K_TXNOFRM, 0x00000010 }, { AR5K_RPGTO, 0x00000000 }, { AR5K_RFCNT, 0x0000001f }, { AR5K_QUEUE_TXDP(0), 0x00000000 }, { AR5K_QUEUE_TXDP(1), 0x00000000 }, { AR5K_QUEUE_TXDP(2), 0x00000000 }, { AR5K_QUEUE_TXDP(3), 0x00000000 }, { AR5K_QUEUE_TXDP(4), 0x00000000 }, { AR5K_QUEUE_TXDP(5), 0x00000000 }, { AR5K_QUEUE_TXDP(6), 0x00000000 }, { AR5K_QUEUE_TXDP(7), 0x00000000 }, { AR5K_QUEUE_TXDP(8), 0x00000000 }, { AR5K_QUEUE_TXDP(9), 0x00000000 }, { AR5K_DCU_FP, 0x00000000 }, { AR5K_DCU_TXP, 0x00000000 }, /* Tx filter table 0 (32 entries) */ { AR5K_DCU_TX_FILTER_0(0), 0x00000000 }, /* DCU 0 */ { AR5K_DCU_TX_FILTER_0(1), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(2), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(3), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(4), 0x00000000 }, /* DCU 1 */ { AR5K_DCU_TX_FILTER_0(5), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(6), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(7), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(8), 0x00000000 }, /* DCU 2 */ { AR5K_DCU_TX_FILTER_0(9), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(10), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(11), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(12), 0x00000000 }, /* DCU 3 */ { AR5K_DCU_TX_FILTER_0(13), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(14), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(15), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(16), 0x00000000 }, /* DCU 4 */ { AR5K_DCU_TX_FILTER_0(17), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(18), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(19), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(20), 0x00000000 }, /* DCU 5 */ { AR5K_DCU_TX_FILTER_0(21), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(22), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(23), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(24), 0x00000000 }, /* DCU 6 */ { AR5K_DCU_TX_FILTER_0(25), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(26), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(27), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(28), 0x00000000 }, /* DCU 7 */ { AR5K_DCU_TX_FILTER_0(29), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(30), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(31), 0x00000000 }, /* Tx filter table 1 (16 entries) */ { AR5K_DCU_TX_FILTER_1(0), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(1), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(2), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(3), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(4), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(5), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(6), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(7), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(8), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(9), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(10), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(11), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(12), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(13), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(14), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(15), 0x00000000 }, { AR5K_DCU_TX_FILTER_CLR, 0x00000000 }, { AR5K_DCU_TX_FILTER_SET, 0x00000000 }, { AR5K_STA_ID1, 0x00000000 }, { AR5K_BSS_ID0, 0x00000000 }, { AR5K_BSS_ID1, 0x00000000 }, { AR5K_BEACON_5211, 0x00000000 }, { AR5K_CFP_PERIOD_5211, 0x00000000 }, { AR5K_TIMER0_5211, 0x00000030 }, { AR5K_TIMER1_5211, 0x0007ffff }, { AR5K_TIMER2_5211, 0x01ffffff }, { AR5K_TIMER3_5211, 0x00000031 }, { AR5K_CFP_DUR_5211, 0x00000000 }, { AR5K_RX_FILTER_5211, 0x00000000 }, { AR5K_DIAG_SW_5211, 0x00000000 }, { AR5K_ADDAC_TEST, 0x00000000 }, { AR5K_DEFAULT_ANTENNA, 0x00000000 }, { AR5K_FRAME_CTL_QOSM, 0x000fc78f }, { AR5K_XRMODE, 0x2a82301a }, { AR5K_XRDELAY, 0x05dc01e0 }, { AR5K_XRTIMEOUT, 0x1f402710 }, { AR5K_XRCHIRP, 0x01f40000 }, { AR5K_XRSTOMP, 0x00001e1c }, { AR5K_SLEEP0, 0x0002aaaa }, { AR5K_SLEEP1, 0x02005555 }, { AR5K_SLEEP2, 0x00000000 }, { AR_BSSMSKL, 0xffffffff }, { AR_BSSMSKU, 0x0000ffff }, { AR5K_TXPC, 0x00000000 }, { AR5K_PROFCNT_TX, 0x00000000 }, { AR5K_PROFCNT_RX, 0x00000000 }, { AR5K_PROFCNT_RXCLR, 0x00000000 }, { AR5K_PROFCNT_CYCLE, 0x00000000 }, { AR5K_QUIET_CTL1, 0x00000088 }, /* Initial rate duration table (32 entries )*/ { AR5K_RATE_DUR(0), 0x00000000 }, { AR5K_RATE_DUR(1), 0x0000008c }, { AR5K_RATE_DUR(2), 0x000000e4 }, { AR5K_RATE_DUR(3), 0x000002d5 }, { AR5K_RATE_DUR(4), 0x00000000 }, { AR5K_RATE_DUR(5), 0x00000000 }, { AR5K_RATE_DUR(6), 0x000000a0 }, { AR5K_RATE_DUR(7), 0x000001c9 }, { AR5K_RATE_DUR(8), 0x0000002c }, { AR5K_RATE_DUR(9), 0x0000002c }, { AR5K_RATE_DUR(10), 0x00000030 }, { AR5K_RATE_DUR(11), 0x0000003c }, { AR5K_RATE_DUR(12), 0x0000002c }, { AR5K_RATE_DUR(13), 0x0000002c }, { AR5K_RATE_DUR(14), 0x00000030 }, { AR5K_RATE_DUR(15), 0x0000003c }, { AR5K_RATE_DUR(16), 0x00000000 }, { AR5K_RATE_DUR(17), 0x00000000 }, { AR5K_RATE_DUR(18), 0x00000000 }, { AR5K_RATE_DUR(19), 0x00000000 }, { AR5K_RATE_DUR(20), 0x00000000 }, { AR5K_RATE_DUR(21), 0x00000000 }, { AR5K_RATE_DUR(22), 0x00000000 }, { AR5K_RATE_DUR(23), 0x00000000 }, { AR5K_RATE_DUR(24), 0x000000d5 }, { AR5K_RATE_DUR(25), 0x000000df }, { AR5K_RATE_DUR(26), 0x00000102 }, { AR5K_RATE_DUR(27), 0x0000013a }, { AR5K_RATE_DUR(28), 0x00000075 }, { AR5K_RATE_DUR(29), 0x0000007f }, { AR5K_RATE_DUR(30), 0x000000a2 }, { AR5K_RATE_DUR(31), 0x00000000 }, { AR5K_QUIET_CTL2, 0x00010002 }, { AR5K_TSF_PARM, 0x00000001 }, { AR5K_QOS_NOACK, 0x000000c0 }, { AR5K_PHY_ERR_FIL, 0x00000000 }, { AR5K_XRLAT_TX, 0x00000168 }, { AR5K_ACKSIFS, 0x00000000 }, /* Rate -> db table * notice ...03<-02<-01<-00 ! */ { AR5K_RATE2DB(0), 0x03020100 }, { AR5K_RATE2DB(1), 0x07060504 }, { AR5K_RATE2DB(2), 0x0b0a0908 }, { AR5K_RATE2DB(3), 0x0f0e0d0c }, { AR5K_RATE2DB(4), 0x13121110 }, { AR5K_RATE2DB(5), 0x17161514 }, { AR5K_RATE2DB(6), 0x1b1a1918 }, { AR5K_RATE2DB(7), 0x1f1e1d1c }, /* Db -> Rate table */ { AR5K_DB2RATE(0), 0x03020100 }, { AR5K_DB2RATE(1), 0x07060504 }, { AR5K_DB2RATE(2), 0x0b0a0908 }, { AR5K_DB2RATE(3), 0x0f0e0d0c }, { AR5K_DB2RATE(4), 0x13121110 }, { AR5K_DB2RATE(5), 0x17161514 }, { AR5K_DB2RATE(6), 0x1b1a1918 }, { AR5K_DB2RATE(7), 0x1f1e1d1c }, /* PHY registers (Common settings * for all chips/modes) */ { AR5K_PHY(3), 0xad848e19 }, { AR5K_PHY(4), 0x7d28e000 }, { AR5K_PHY_TIMING_3, 0x9c0a9f6b }, { AR5K_PHY_ACT, 0x00000000 }, { AR5K_PHY(16), 0x206a017a }, { AR5K_PHY(21), 0x00000859 }, { AR5K_PHY_BIN_MASK_1, 0x00000000 }, { AR5K_PHY_BIN_MASK_2, 0x00000000 }, { AR5K_PHY_BIN_MASK_3, 0x00000000 }, { AR5K_PHY_BIN_MASK_CTL, 0x00800000 }, { AR5K_PHY_ANT_CTL, 0x00000001 }, /*{ AR5K_PHY(71), 0x0000092a },*/ /* Old value */ { AR5K_PHY_MAX_RX_LEN, 0x00000c80 }, { AR5K_PHY_IQ, 0x05100000 }, { AR5K_PHY_WARM_RESET, 0x00000001 }, { AR5K_PHY_CTL, 0x00000004 }, { AR5K_PHY_TXPOWER_RATE1, 0x1e1f2022 }, { AR5K_PHY_TXPOWER_RATE2, 0x0a0b0c0d }, { AR5K_PHY_TXPOWER_RATE_MAX, 0x0000003f }, { AR5K_PHY(82), 0x9280b212 }, { AR5K_PHY_RADAR, 0x5d50e188 }, /*{ AR5K_PHY(86), 0x000000ff },*/ { AR5K_PHY(87), 0x004b6a8e }, { AR5K_PHY_NFTHRES, 0x000003ce }, { AR5K_PHY_RESTART, 0x192fb515 }, { AR5K_PHY(94), 0x00000001 }, { AR5K_PHY_RFBUS_REQ, 0x00000000 }, /*{ AR5K_PHY(644), 0x0080a333 },*/ /* Old value */ /*{ AR5K_PHY(645), 0x00206c10 },*/ /* Old value */ { AR5K_PHY(644), 0x00806333 }, { AR5K_PHY(645), 0x00106c10 }, { AR5K_PHY(646), 0x009c4060 }, /* { AR5K_PHY(647), 0x1483800a }, */ /* { AR5K_PHY(648), 0x01831061 }, */ /* Old value */ { AR5K_PHY(648), 0x018830c6 }, { AR5K_PHY(649), 0x00000400 }, /*{ AR5K_PHY(650), 0x000001b5 },*/ { AR5K_PHY(651), 0x00000000 }, { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, { AR5K_PHY_TXPOWER_RATE4, 0x20202020 }, /*{ AR5K_PHY(655), 0x13c889af },*/ { AR5K_PHY(656), 0x38490a20 }, { AR5K_PHY(657), 0x00007bb6 }, { AR5K_PHY(658), 0x0fff3ffc }, }; /* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = { { AR5K_QUEUE_DFS_LOCAL_IFS(0), /* A/XR B G */ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(1), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(2), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(3), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(4), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(5), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(6), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(7), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(8), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(9), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_DCU_GBL_IFS_SIFS, { 0x00000230, 0x000000b0, 0x00000160 } }, { AR5K_DCU_GBL_IFS_SLOT, { 0x00000168, 0x000001b8, 0x0000018c } }, { AR5K_DCU_GBL_IFS_EIFS, { 0x00000e60, 0x00001f1c, 0x00003e38 } }, { AR5K_DCU_GBL_IFS_MISC, { 0x0000a0e0, 0x00005880, 0x0000b0e0 } }, { AR5K_TIME_OUT, { 0x03e803e8, 0x04200420, 0x08400840 } }, { AR5K_PHY(8), { 0x02020200, 0x02010200, 0x02020200 } }, { AR5K_PHY_RF_CTL2, { 0x00000e0e, 0x00000707, 0x00000e0e } }, { AR5K_PHY_SETTLING, { 0x1372161c, 0x13721722, 0x137216a2 } }, { AR5K_PHY_AGCCTL, { 0x00009d10, 0x00009d18, 0x00009d18 } }, { AR5K_PHY_NF, { 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, { AR5K_PHY_WEAK_OFDM_HIGH_THR, { 0x409a4190, 0x409a4190, 0x409a4190 } }, { AR5K_PHY(70), { 0x000001b8, 0x00000084, 0x00000108 } }, { AR5K_PHY_OFDM_SELFCORR, { 0x10058a05, 0x10058a05, 0x10058a05 } }, { 0xa230, { 0x00000000, 0x00000000, 0x00000108 } }, }; /* Initial mode-specific settings for AR5212 + RF5111 * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00008015, 0x00008015, 0x00008015 } }, { AR5K_USEC_5211, { 0x128d8fa7, 0x04e00f95, 0x12e00fab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05010100, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018da5a, 0x0018ca69, 0x0018ca69 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3137665e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb080 } }, { AR5K_PHY_RX_DELAY, { 0x00002710, 0x0000157c, 0x00002af8 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81020, 0xf7b80d20, 0xf7b81020 } }, { AR5K_PHY_GAIN_2GHZ, { 0x642c416a, 0x6440416a, 0x6440416a } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1873800a, 0x1883800a } }, }; /* Common for all modes */ static const struct ath5k_ini rf5111_ini_common_end[] = { { AR5K_DCU_FP, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x00022ffe }, { 0x983c, 0x00020100 }, { AR5K_PHY_GAIN_OFFSET, 0x1284613c }, { AR5K_PHY_PAPD_PROBE, 0x00004883 }, { 0x9940, 0x00000004 }, { 0x9958, 0x000000ff }, { 0x9974, 0x00000000 }, { AR5K_PHY_SPENDING, 0x00000018 }, { AR5K_PHY_CCKTXCTL, 0x00000000 }, { AR5K_PHY_CCK_CROSSCORR, 0xd03e6788 }, { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 }, { 0xa23c, 0x13c889af }, }; /* Initial mode-specific settings for AR5212 + RF5112 * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5112_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00008015, 0x00008015, 0x00008015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x04e01395, 0x12e013ab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05020100, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018da6d, 0x0018ca75, 0x0018ca75 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3137665e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x0000044c, 0x00000898 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81020, 0xf7b80d10, 0xf7b81010 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000008, 0x00000008 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x642c0140, 0x6442c160, 0x6442c160 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1873800a, 0x1883800a } }, }; static const struct ath5k_ini rf5112_ini_common_end[] = { { AR5K_DCU_FP, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x00022ffe }, { 0x983c, 0x00020100 }, { AR5K_PHY_GAIN_OFFSET, 0x1284613c }, { AR5K_PHY_PAPD_PROBE, 0x00004882 }, { 0x9940, 0x00000004 }, { 0x9958, 0x000000ff }, { 0x9974, 0x00000000 }, { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 }, { 0xa23c, 0x13c889af }, }; /* Initial mode-specific settings for RF5413/5414 * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x04e01395, 0x12e013ab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05020100, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018fa61, 0x001a1a63, 0x001a1a63 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } }, { AR5K_PHY_SIG, { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3139605e, 0x3139605e, 0x3139605e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x0000044c, 0x00000898 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x002ec1e0, 0x002ac120, 0x002ac120 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1863800a, 0x1883800a } }, { 0xa300, { 0x18010000, 0x18010000, 0x18010000 } }, { 0xa304, { 0x30032602, 0x30032602, 0x30032602 } }, { 0xa308, { 0x48073e06, 0x48073e06, 0x48073e06 } }, { 0xa30c, { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } }, { 0xa310, { 0x641a600f, 0x641a600f, 0x641a600f } }, { 0xa314, { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } }, { 0xa318, { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } }, { 0xa31c, { 0x90cf865b, 0x8ecf865b, 0x8ecf865b } }, { 0xa320, { 0x9d4f970f, 0x9b4f970f, 0x9b4f970f } }, { 0xa324, { 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f } }, { 0xa328, { 0xb55faf1f, 0xb35faf1f, 0xb35faf1f } }, { 0xa32c, { 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f } }, { 0xa330, { 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f } }, { 0xa334, { 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } }, }; static const struct ath5k_ini rf5413_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_5414_CBCFG, 0x00000010 }, { AR5K_SEQ_MASK, 0x0000000f }, { 0x809c, 0x00000000 }, { 0x80a0, 0x00000000 }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800003f9 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x00081fff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c16a }, { 0xa270, 0x00820820 }, { 0xa274, 0x081b7caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa338, 0x00000000 }, { 0xa33c, 0x00000000 }, { 0xa340, 0x00000000 }, { 0xa344, 0x00000000 }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* Initial mode-specific settings for RF2413/2414 * (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x04e01395, 0x12e013ab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05020000, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e00, 0x00000e00, 0x00000e00 } }, { AR5K_PHY_PA_CTL, { 0x00000002, 0x0000000a, 0x0000000a } }, { AR5K_PHY_GAIN, { 0x0018da6d, 0x001a6a64, 0x001a6a64 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3139605e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x0000044c, 0x00000898 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x002c0140, 0x0042c140, 0x0042c140 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1863800a, 0x1883800a } }, }; static const struct ath5k_ini rf2413_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_SEQ_MASK, 0x0000000f }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800000a8 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x000000ff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c16a }, { 0xa270, 0x00820820 }, { 0xa274, 0x001b7caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa300, 0x18010000 }, { 0xa304, 0x30032602 }, { 0xa308, 0x48073e06 }, { 0xa30c, 0x560b4c0a }, { 0xa310, 0x641a600f }, { 0xa314, 0x784f6e1b }, { 0xa318, 0x868f7c5a }, { 0xa31c, 0x8ecf865b }, { 0xa320, 0x9d4f970f }, { 0xa324, 0xa5cfa18f }, { 0xa328, 0xb55faf1f }, { 0xa32c, 0xbddfb99f }, { 0xa330, 0xcd7fc73f }, { 0xa334, 0xd5ffd1bf }, { 0xa338, 0x00000000 }, { 0xa33c, 0x00000000 }, { 0xa340, 0x00000000 }, { 0xa344, 0x00000000 }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* Initial mode-specific settings for RF2425 * (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x04e01395, 0x12e013ab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05020100, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000003, 0x0000000b, 0x0000000b } }, { AR5K_PHY_SETTLING, { 0x1372161c, 0x13721722, 0x13721422 } }, { AR5K_PHY_GAIN, { 0x0018fa61, 0x00199a65, 0x00199a65 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } }, { AR5K_PHY_SIG, { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3139605e, 0x3139605e, 0x3139605e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x0000044c, 0x00000898 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x00000140, 0x0052c140, 0x0052c140 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1863800a, 0x1883800a } }, { 0xa324, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa328, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa32c, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa330, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa334, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, }; static const struct ath5k_ini rf2425_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_SEQ_MASK, 0x0000000f }, { 0x809c, 0x00000000 }, { 0x80a0, 0x00000000 }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800003f9 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x00081fff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { 0x99dc, 0xfebadbe8 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, { AR5K_PHY_TXPOWER_RATE4, 0x20202020 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c166 }, { 0xa270, 0x00820820 }, { 0xa274, 0x081a3caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa300, 0x16010000 }, { 0xa304, 0x2c032402 }, { 0xa308, 0x48433e42 }, { 0xa30c, 0x5a0f500b }, { 0xa310, 0x6c4b624a }, { 0xa314, 0x7e8b748a }, { 0xa318, 0x96cf8ccb }, { 0xa31c, 0xa34f9d0f }, { 0xa320, 0xa7cfa58f }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* * Initial BaseBand Gain settings for RF5111/5112 (AR5210 comes with * RF5110 only so initial BB Gain settings are included in AR5K_AR5210_INI) */ /* RF5111 Initial BaseBand Gain settings */ static const struct ath5k_ini rf5111_ini_bbgain[] = { { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000020 }, { AR5K_BB_GAIN(2), 0x00000010 }, { AR5K_BB_GAIN(3), 0x00000030 }, { AR5K_BB_GAIN(4), 0x00000008 }, { AR5K_BB_GAIN(5), 0x00000028 }, { AR5K_BB_GAIN(6), 0x00000004 }, { AR5K_BB_GAIN(7), 0x00000024 }, { AR5K_BB_GAIN(8), 0x00000014 }, { AR5K_BB_GAIN(9), 0x00000034 }, { AR5K_BB_GAIN(10), 0x0000000c }, { AR5K_BB_GAIN(11), 0x0000002c }, { AR5K_BB_GAIN(12), 0x00000002 }, { AR5K_BB_GAIN(13), 0x00000022 }, { AR5K_BB_GAIN(14), 0x00000012 }, { AR5K_BB_GAIN(15), 0x00000032 }, { AR5K_BB_GAIN(16), 0x0000000a }, { AR5K_BB_GAIN(17), 0x0000002a }, { AR5K_BB_GAIN(18), 0x00000006 }, { AR5K_BB_GAIN(19), 0x00000026 }, { AR5K_BB_GAIN(20), 0x00000016 }, { AR5K_BB_GAIN(21), 0x00000036 }, { AR5K_BB_GAIN(22), 0x0000000e }, { AR5K_BB_GAIN(23), 0x0000002e }, { AR5K_BB_GAIN(24), 0x00000001 }, { AR5K_BB_GAIN(25), 0x00000021 }, { AR5K_BB_GAIN(26), 0x00000011 }, { AR5K_BB_GAIN(27), 0x00000031 }, { AR5K_BB_GAIN(28), 0x00000009 }, { AR5K_BB_GAIN(29), 0x00000029 }, { AR5K_BB_GAIN(30), 0x00000005 }, { AR5K_BB_GAIN(31), 0x00000025 }, { AR5K_BB_GAIN(32), 0x00000015 }, { AR5K_BB_GAIN(33), 0x00000035 }, { AR5K_BB_GAIN(34), 0x0000000d }, { AR5K_BB_GAIN(35), 0x0000002d }, { AR5K_BB_GAIN(36), 0x00000003 }, { AR5K_BB_GAIN(37), 0x00000023 }, { AR5K_BB_GAIN(38), 0x00000013 }, { AR5K_BB_GAIN(39), 0x00000033 }, { AR5K_BB_GAIN(40), 0x0000000b }, { AR5K_BB_GAIN(41), 0x0000002b }, { AR5K_BB_GAIN(42), 0x0000002b }, { AR5K_BB_GAIN(43), 0x0000002b }, { AR5K_BB_GAIN(44), 0x0000002b }, { AR5K_BB_GAIN(45), 0x0000002b }, { AR5K_BB_GAIN(46), 0x0000002b }, { AR5K_BB_GAIN(47), 0x0000002b }, { AR5K_BB_GAIN(48), 0x0000002b }, { AR5K_BB_GAIN(49), 0x0000002b }, { AR5K_BB_GAIN(50), 0x0000002b }, { AR5K_BB_GAIN(51), 0x0000002b }, { AR5K_BB_GAIN(52), 0x0000002b }, { AR5K_BB_GAIN(53), 0x0000002b }, { AR5K_BB_GAIN(54), 0x0000002b }, { AR5K_BB_GAIN(55), 0x0000002b }, { AR5K_BB_GAIN(56), 0x0000002b }, { AR5K_BB_GAIN(57), 0x0000002b }, { AR5K_BB_GAIN(58), 0x0000002b }, { AR5K_BB_GAIN(59), 0x0000002b }, { AR5K_BB_GAIN(60), 0x0000002b }, { AR5K_BB_GAIN(61), 0x0000002b }, { AR5K_BB_GAIN(62), 0x00000002 }, { AR5K_BB_GAIN(63), 0x00000016 }, }; /* RF5112 Initial BaseBand Gain settings (Same for RF5413/5414+) */ static const struct ath5k_ini rf5112_ini_bbgain[] = { { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000001 }, { AR5K_BB_GAIN(2), 0x00000002 }, { AR5K_BB_GAIN(3), 0x00000003 }, { AR5K_BB_GAIN(4), 0x00000004 }, { AR5K_BB_GAIN(5), 0x00000005 }, { AR5K_BB_GAIN(6), 0x00000008 }, { AR5K_BB_GAIN(7), 0x00000009 }, { AR5K_BB_GAIN(8), 0x0000000a }, { AR5K_BB_GAIN(9), 0x0000000b }, { AR5K_BB_GAIN(10), 0x0000000c }, { AR5K_BB_GAIN(11), 0x0000000d }, { AR5K_BB_GAIN(12), 0x00000010 }, { AR5K_BB_GAIN(13), 0x00000011 }, { AR5K_BB_GAIN(14), 0x00000012 }, { AR5K_BB_GAIN(15), 0x00000013 }, { AR5K_BB_GAIN(16), 0x00000014 }, { AR5K_BB_GAIN(17), 0x00000015 }, { AR5K_BB_GAIN(18), 0x00000018 }, { AR5K_BB_GAIN(19), 0x00000019 }, { AR5K_BB_GAIN(20), 0x0000001a }, { AR5K_BB_GAIN(21), 0x0000001b }, { AR5K_BB_GAIN(22), 0x0000001c }, { AR5K_BB_GAIN(23), 0x0000001d }, { AR5K_BB_GAIN(24), 0x00000020 }, { AR5K_BB_GAIN(25), 0x00000021 }, { AR5K_BB_GAIN(26), 0x00000022 }, { AR5K_BB_GAIN(27), 0x00000023 }, { AR5K_BB_GAIN(28), 0x00000024 }, { AR5K_BB_GAIN(29), 0x00000025 }, { AR5K_BB_GAIN(30), 0x00000028 }, { AR5K_BB_GAIN(31), 0x00000029 }, { AR5K_BB_GAIN(32), 0x0000002a }, { AR5K_BB_GAIN(33), 0x0000002b }, { AR5K_BB_GAIN(34), 0x0000002c }, { AR5K_BB_GAIN(35), 0x0000002d }, { AR5K_BB_GAIN(36), 0x00000030 }, { AR5K_BB_GAIN(37), 0x00000031 }, { AR5K_BB_GAIN(38), 0x00000032 }, { AR5K_BB_GAIN(39), 0x00000033 }, { AR5K_BB_GAIN(40), 0x00000034 }, { AR5K_BB_GAIN(41), 0x00000035 }, { AR5K_BB_GAIN(42), 0x00000035 }, { AR5K_BB_GAIN(43), 0x00000035 }, { AR5K_BB_GAIN(44), 0x00000035 }, { AR5K_BB_GAIN(45), 0x00000035 }, { AR5K_BB_GAIN(46), 0x00000035 }, { AR5K_BB_GAIN(47), 0x00000035 }, { AR5K_BB_GAIN(48), 0x00000035 }, { AR5K_BB_GAIN(49), 0x00000035 }, { AR5K_BB_GAIN(50), 0x00000035 }, { AR5K_BB_GAIN(51), 0x00000035 }, { AR5K_BB_GAIN(52), 0x00000035 }, { AR5K_BB_GAIN(53), 0x00000035 }, { AR5K_BB_GAIN(54), 0x00000035 }, { AR5K_BB_GAIN(55), 0x00000035 }, { AR5K_BB_GAIN(56), 0x00000035 }, { AR5K_BB_GAIN(57), 0x00000035 }, { AR5K_BB_GAIN(58), 0x00000035 }, { AR5K_BB_GAIN(59), 0x00000035 }, { AR5K_BB_GAIN(60), 0x00000035 }, { AR5K_BB_GAIN(61), 0x00000035 }, { AR5K_BB_GAIN(62), 0x00000010 }, { AR5K_BB_GAIN(63), 0x0000001a }, }; /** * ath5k_hw_ini_registers() - Write initial register dump common for all modes * @ah: The &struct ath5k_hw * @size: Dump size * @ini_regs: The array of &struct ath5k_ini * @skip_pcu: Skip PCU registers */ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini *ini_regs, bool skip_pcu) { unsigned int i; /* Write initial registers */ for (i = 0; i < size; i++) { /* Skip PCU registers if * requested */ if (skip_pcu && ini_regs[i].ini_register >= AR5K_PCU_MIN && ini_regs[i].ini_register <= AR5K_PCU_MAX) continue; switch (ini_regs[i].ini_mode) { case AR5K_INI_READ: /* Cleared on read */ ath5k_hw_reg_read(ah, ini_regs[i].ini_register); break; case AR5K_INI_WRITE: default: AR5K_REG_WAIT(i); ath5k_hw_reg_write(ah, ini_regs[i].ini_value, ini_regs[i].ini_register); } } } /** * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump * @ah: The &struct ath5k_hw * @size: Dump size * @ini_mode: The array of &struct ath5k_ini_mode * @mode: One of enum ath5k_driver_mode */ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini_mode *ini_mode, u8 mode) { unsigned int i; for (i = 0; i < size; i++) { AR5K_REG_WAIT(i); ath5k_hw_reg_write(ah, ini_mode[i].mode_value[mode], (u32)ini_mode[i].mode_register); } } /** * ath5k_hw_write_initvals() - Write initial chip-specific register dump * @ah: The &struct ath5k_hw * @mode: One of enum ath5k_driver_mode * @skip_pcu: Skip PCU registers * * Write initial chip-specific register dump, to get the chipset on a * clean and ready-to-work state after warm reset. */ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu) { /* * Write initial register settings */ /* For AR5212 and compatible */ if (ah->ah_version == AR5K_AR5212) { /* First set of mode-specific settings */ ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5212_ini_mode_start), ar5212_ini_mode_start, mode); /* * Write initial settings common for all modes */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start), ar5212_ini_common_start, skip_pcu); /* Second set of mode-specific settings */ switch (ah->ah_radio) { case AR5K_RF5111: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5111_ini_mode_end), rf5111_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_common_end), rf5111_ini_common_end, skip_pcu); /* Baseband gain table */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain), rf5111_ini_bbgain, skip_pcu); break; case AR5K_RF5112: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5112_ini_mode_end), rf5112_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_common_end), rf5112_ini_common_end, skip_pcu); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; case AR5K_RF5413: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5413_ini_mode_end), rf5413_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5413_ini_common_end), rf5413_ini_common_end, skip_pcu); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; case AR5K_RF2316: case AR5K_RF2413: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf2413_ini_mode_end), rf2413_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf2413_ini_common_end), rf2413_ini_common_end, skip_pcu); /* Override settings from rf2413_ini_common_end */ if (ah->ah_radio == AR5K_RF2316) { ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC); ath5k_hw_reg_write(ah, 0x081b7caa, 0xa274); } ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; case AR5K_RF2317: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf2413_ini_mode_end), rf2413_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf2425_ini_common_end), rf2425_ini_common_end, skip_pcu); /* Override settings from rf2413_ini_mode_end */ ath5k_hw_reg_write(ah, 0x00180a65, AR5K_PHY_GAIN); /* Override settings from rf2413_ini_common_end */ ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC); AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TPC_RG5, AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP, 0xa); ath5k_hw_reg_write(ah, 0x800000a8, 0x8140); ath5k_hw_reg_write(ah, 0x000000ff, 0x9958); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; case AR5K_RF2425: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf2425_ini_mode_end), rf2425_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf2425_ini_common_end), rf2425_ini_common_end, skip_pcu); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; default: return -EINVAL; } /* For AR5211 */ } else if (ah->ah_version == AR5K_AR5211) { /* AR5K_MODE_11B */ if (mode > 2) { ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode); return -EINVAL; } /* Mode-specific settings */ ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5211_ini_mode), ar5211_ini_mode, mode); /* * Write initial settings common for all modes */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini), ar5211_ini, skip_pcu); /* AR5211 only comes with 5111 */ /* Baseband gain table */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain), rf5111_ini_bbgain, skip_pcu); /* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */ } else if (ah->ah_version == AR5K_AR5210) { ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini), ar5210_ini, skip_pcu); } return 0; }
gpl-2.0
XiaoJiang/linux-3.4.4
net/sctp/proc.c
5180
13873
/* SCTP kernel implementation * Copyright (c) 2003 International Business Machines, Corp. * * This file is part of the SCTP kernel implementation * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * Sridhar Samudrala <sri@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/export.h> #include <net/sctp/sctp.h> #include <net/ip.h> /* for snmp_fold_field */ static const struct snmp_mib sctp_snmp_list[] = { SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS), SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS), SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES), SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS), SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS), SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS), SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS), SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS), SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS), SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS), SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS), SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS), SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS), SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS), SNMP_MIB_ITEM("SctpT1InitExpireds", SCTP_MIB_T1_INIT_EXPIREDS), SNMP_MIB_ITEM("SctpT1CookieExpireds", SCTP_MIB_T1_COOKIE_EXPIREDS), SNMP_MIB_ITEM("SctpT2ShutdownExpireds", SCTP_MIB_T2_SHUTDOWN_EXPIREDS), SNMP_MIB_ITEM("SctpT3RtxExpireds", SCTP_MIB_T3_RTX_EXPIREDS), SNMP_MIB_ITEM("SctpT4RtoExpireds", SCTP_MIB_T4_RTO_EXPIREDS), SNMP_MIB_ITEM("SctpT5ShutdownGuardExpireds", SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS), SNMP_MIB_ITEM("SctpDelaySackExpireds", SCTP_MIB_DELAY_SACK_EXPIREDS), SNMP_MIB_ITEM("SctpAutocloseExpireds", SCTP_MIB_AUTOCLOSE_EXPIREDS), SNMP_MIB_ITEM("SctpT3Retransmits", SCTP_MIB_T3_RETRANSMITS), SNMP_MIB_ITEM("SctpPmtudRetransmits", SCTP_MIB_PMTUD_RETRANSMITS), SNMP_MIB_ITEM("SctpFastRetransmits", SCTP_MIB_FAST_RETRANSMITS), SNMP_MIB_ITEM("SctpInPktSoftirq", SCTP_MIB_IN_PKT_SOFTIRQ), SNMP_MIB_ITEM("SctpInPktBacklog", SCTP_MIB_IN_PKT_BACKLOG), SNMP_MIB_ITEM("SctpInPktDiscards", SCTP_MIB_IN_PKT_DISCARDS), SNMP_MIB_ITEM("SctpInDataChunkDiscards", SCTP_MIB_IN_DATA_CHUNK_DISCARDS), SNMP_MIB_SENTINEL }; /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */ static int sctp_snmp_seq_show(struct seq_file *seq, void *v) { int i; for (i = 0; sctp_snmp_list[i].name != NULL; i++) seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, snmp_fold_field((void __percpu **)sctp_statistics, sctp_snmp_list[i].entry)); return 0; } /* Initialize the seq file operations for 'snmp' object. */ static int sctp_snmp_seq_open(struct inode *inode, struct file *file) { return single_open(file, sctp_snmp_seq_show, NULL); } static const struct file_operations sctp_snmp_seq_fops = { .owner = THIS_MODULE, .open = sctp_snmp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* Set up the proc fs entry for 'snmp' object. */ int __init sctp_snmp_proc_init(void) { struct proc_dir_entry *p; p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'snmp' object. */ void sctp_snmp_proc_exit(void) { remove_proc_entry("snmp", proc_net_sctp); } /* Dump local addresses of an association/endpoint. */ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb) { struct sctp_association *asoc; struct sctp_sockaddr_entry *laddr; struct sctp_transport *peer; union sctp_addr *addr, *primary = NULL; struct sctp_af *af; if (epb->type == SCTP_EP_TYPE_ASSOCIATION) { asoc = sctp_assoc(epb); peer = asoc->peer.primary_path; primary = &peer->saddr; } list_for_each_entry(laddr, &epb->bind_addr.address_list, list) { addr = &laddr->a; af = sctp_get_af_specific(addr->sa.sa_family); if (primary && af->cmp_addr(addr, primary)) { seq_printf(seq, "*"); } af->seq_dump_addr(seq, addr); } } /* Dump remote addresses of an association. */ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc) { struct sctp_transport *transport; union sctp_addr *addr, *primary; struct sctp_af *af; primary = &assoc->peer.primary_addr; list_for_each_entry(transport, &assoc->peer.transport_addr_list, transports) { addr = &transport->ipaddr; af = sctp_get_af_specific(addr->sa.sa_family); if (af->cmp_addr(addr, primary)) { seq_printf(seq, "*"); } af->seq_dump_addr(seq, addr); } } static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_ep_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); return (void *)pos; } static void sctp_eps_seq_stop(struct seq_file *seq, void *v) { } static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_ep_hashsize) return NULL; return pos; } /* Display sctp endpoints (/proc/net/sctp/eps). */ static int sctp_eps_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_endpoint *ep; struct sock *sk; struct hlist_node *node; int hash = *(loff_t *)v; if (hash >= sctp_ep_hashsize) return -ENOMEM; head = &sctp_ep_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { ep = sctp_ep(epb); sk = epb->sk; seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, sctp_sk(sk)->type, sk->sk_state, hash, epb->bind_addr.port, sock_i_uid(sk), sock_i_ino(sk)); sctp_seq_dump_local_addrs(seq, epb); seq_printf(seq, "\n"); } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_eps_ops = { .start = sctp_eps_seq_start, .next = sctp_eps_seq_next, .stop = sctp_eps_seq_stop, .show = sctp_eps_seq_show, }; /* Initialize the seq file operations for 'eps' object. */ static int sctp_eps_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_eps_ops); } static const struct file_operations sctp_eps_seq_fops = { .open = sctp_eps_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Set up the proc fs entry for 'eps' object. */ int __init sctp_eps_proc_init(void) { struct proc_dir_entry *p; p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'eps' object. */ void sctp_eps_proc_exit(void) { remove_proc_entry("eps", proc_net_sctp); } static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_assoc_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, " ASSOC SOCK STY SST ST HBKT " "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " "RPORT LADDRS <-> RADDRS " "HBINT INS OUTS MAXRT T1X T2X RTXC\n"); return (void *)pos; } static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) { } static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_assoc_hashsize) return NULL; return pos; } /* Display sctp associations (/proc/net/sctp/assocs). */ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_association *assoc; struct sock *sk; struct hlist_node *node; int hash = *(loff_t *)v; if (hash >= sctp_assoc_hashsize) return -ENOMEM; head = &sctp_assoc_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { assoc = sctp_assoc(epb); sk = epb->sk; seq_printf(seq, "%8pK %8pK %-3d %-3d %-2d %-4d " "%4d %8d %8d %7d %5lu %-5d %5d ", assoc, sk, sctp_sk(sk)->type, sk->sk_state, assoc->state, hash, assoc->assoc_id, assoc->sndbuf_used, atomic_read(&assoc->rmem_alloc), sock_i_uid(sk), sock_i_ino(sk), epb->bind_addr.port, assoc->peer.port); seq_printf(seq, " "); sctp_seq_dump_local_addrs(seq, epb); seq_printf(seq, "<-> "); sctp_seq_dump_remote_addrs(seq, assoc); seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d ", assoc->hbinterval, assoc->c.sinit_max_instreams, assoc->c.sinit_num_ostreams, assoc->max_retrans, assoc->init_retries, assoc->shutdown_retries, assoc->rtx_data_chunks); seq_printf(seq, "\n"); } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_assoc_ops = { .start = sctp_assocs_seq_start, .next = sctp_assocs_seq_next, .stop = sctp_assocs_seq_stop, .show = sctp_assocs_seq_show, }; /* Initialize the seq file operations for 'assocs' object. */ static int sctp_assocs_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_assoc_ops); } static const struct file_operations sctp_assocs_seq_fops = { .open = sctp_assocs_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* Set up the proc fs entry for 'assocs' object. */ int __init sctp_assocs_proc_init(void) { struct proc_dir_entry *p; p = proc_create("assocs", S_IRUGO, proc_net_sctp, &sctp_assocs_seq_fops); if (!p) return -ENOMEM; return 0; } /* Cleanup the proc fs entry for 'assocs' object. */ void sctp_assocs_proc_exit(void) { remove_proc_entry("assocs", proc_net_sctp); } static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos >= sctp_assoc_hashsize) return NULL; if (*pos < 0) *pos = 0; if (*pos == 0) seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " "REM_ADDR_RTX START\n"); return (void *)pos; } static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) { if (++*pos >= sctp_assoc_hashsize) return NULL; return pos; } static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) { } static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) { struct sctp_hashbucket *head; struct sctp_ep_common *epb; struct sctp_association *assoc; struct hlist_node *node; struct sctp_transport *tsp; int hash = *(loff_t *)v; if (hash >= sctp_assoc_hashsize) return -ENOMEM; head = &sctp_assoc_hashtable[hash]; sctp_local_bh_disable(); read_lock(&head->lock); sctp_for_each_hentry(epb, node, &head->chain) { assoc = sctp_assoc(epb); list_for_each_entry(tsp, &assoc->peer.transport_addr_list, transports) { /* * The remote address (ADDR) */ tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr); seq_printf(seq, " "); /* * The association ID (ASSOC_ID) */ seq_printf(seq, "%d ", tsp->asoc->assoc_id); /* * If the Heartbeat is active (HB_ACT) * Note: 1 = Active, 0 = Inactive */ seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer)); /* * Retransmit time out (RTO) */ seq_printf(seq, "%lu ", tsp->rto); /* * Maximum path retransmit count (PATH_MAX_RTX) */ seq_printf(seq, "%d ", tsp->pathmaxrxt); /* * remote address retransmit count (REM_ADDR_RTX) * Note: We don't have a way to tally this at the moment * so lets just leave it as zero for the moment */ seq_printf(seq, "0 "); /* * remote address start time (START). This is also not * currently implemented, but we can record it with a * jiffies marker in a subsequent patch */ seq_printf(seq, "0"); seq_printf(seq, "\n"); } } read_unlock(&head->lock); sctp_local_bh_enable(); return 0; } static const struct seq_operations sctp_remaddr_ops = { .start = sctp_remaddr_seq_start, .next = sctp_remaddr_seq_next, .stop = sctp_remaddr_seq_stop, .show = sctp_remaddr_seq_show, }; /* Cleanup the proc fs entry for 'remaddr' object. */ void sctp_remaddr_proc_exit(void) { remove_proc_entry("remaddr", proc_net_sctp); } static int sctp_remaddr_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &sctp_remaddr_ops); } static const struct file_operations sctp_remaddr_seq_fops = { .open = sctp_remaddr_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int __init sctp_remaddr_proc_init(void) { struct proc_dir_entry *p; p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops); if (!p) return -ENOMEM; return 0; }
gpl-2.0
XirXes/pyramid-3.4.10
arch/arm/plat-mxc/devices/platform-spi_imx.c
5436
4130
/* * Copyright (C) 2009-2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_spi_imx_data_entry_single(soc, type, _devid, _id, hwid, _size) \ { \ .devid = _devid, \ .id = _id, \ .iobase = soc ## _ ## type ## hwid ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_ ## type ## hwid, \ } #define imx_spi_imx_data_entry(soc, type, devid, id, hwid, size) \ [id] = imx_spi_imx_data_entry_single(soc, type, devid, id, hwid, size) #ifdef CONFIG_SOC_IMX1 const struct imx_spi_imx_data imx1_cspi_data[] __initconst = { #define imx1_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX1, CSPI, "imx1-cspi", _id, _hwid, SZ_4K) imx1_cspi_data_entry(0, 1), imx1_cspi_data_entry(1, 2), }; #endif #ifdef CONFIG_SOC_IMX21 const struct imx_spi_imx_data imx21_cspi_data[] __initconst = { #define imx21_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K) imx21_cspi_data_entry(0, 1), imx21_cspi_data_entry(1, 2), }; #endif #ifdef CONFIG_SOC_IMX25 /* i.mx25 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx25_cspi_data[] __initconst = { #define imx25_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX25, CSPI, "imx35-cspi", _id, _hwid, SZ_16K) imx25_cspi_data_entry(0, 1), imx25_cspi_data_entry(1, 2), imx25_cspi_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_spi_imx_data imx27_cspi_data[] __initconst = { #define imx27_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX27, CSPI, "imx27-cspi", _id, _hwid, SZ_4K) imx27_cspi_data_entry(0, 1), imx27_cspi_data_entry(1, 2), imx27_cspi_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_spi_imx_data imx31_cspi_data[] __initconst = { #define imx31_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX31, CSPI, "imx31-cspi", _id, _hwid, SZ_4K) imx31_cspi_data_entry(0, 1), imx31_cspi_data_entry(1, 2), imx31_cspi_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX31 */ #ifdef CONFIG_SOC_IMX35 const struct imx_spi_imx_data imx35_cspi_data[] __initconst = { #define imx35_cspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX35, CSPI, "imx35-cspi", _id, _hwid, SZ_4K) imx35_cspi_data_entry(0, 1), imx35_cspi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX35 */ #ifdef CONFIG_SOC_IMX51 /* i.mx51 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx51_cspi_data __initconst = imx_spi_imx_data_entry_single(MX51, CSPI, "imx35-cspi", 2, , SZ_4K); const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = { #define imx51_ecspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX51, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K) imx51_ecspi_data_entry(0, 1), imx51_ecspi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX51 */ #ifdef CONFIG_SOC_IMX53 /* i.mx53 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx53_cspi_data __initconst = imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 0, , SZ_4K); /* i.mx53 has the i.mx51 type ecspi */ const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = { #define imx53_ecspi_data_entry(_id, _hwid) \ imx_spi_imx_data_entry(MX53, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K) imx53_ecspi_data_entry(0, 1), imx53_ecspi_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX53 */ struct platform_device *__init imx_add_spi_imx( const struct imx_spi_imx_data *data, const struct spi_imx_master *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device(data->devid, data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); }
gpl-2.0
RomaVis/eeenote-kernel
net/ceph/auth_x.c
6460
16665
#include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include "crypto.h" #include "auth_x.h" #include "auth_x_protocol.h" #define TEMP_TICKET_BUF_LEN 256 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); static int ceph_x_is_authenticated(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_is_authenticated want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return (ac->want_keys & xi->have_keys) == ac->want_keys; } static int ceph_x_should_authenticate(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_should_authenticate want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return need != 0; } static int ceph_x_encrypt_buflen(int ilen) { return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + sizeof(u32); } static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *ibuf, int ilen, void *obuf, size_t olen) { struct ceph_x_encrypt_header head = { .struct_v = 1, .magic = cpu_to_le64(CEPHX_ENC_MAGIC) }; size_t len = olen - sizeof(u32); int ret; ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len, &head, sizeof(head), ibuf, ilen); if (ret) return ret; ceph_encode_32(&obuf, len); return len + sizeof(u32); } static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end, void *obuf, size_t olen) { struct ceph_x_encrypt_header head; size_t head_len = sizeof(head); int len, ret; len = ceph_decode_32(p); if (*p + len > end) return -EINVAL; dout("ceph_x_decrypt len %d\n", len); ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen, *p, len); if (ret) return ret; if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) return -EPERM; *p += len; return olen; } /* * get existing (or insert new) ticket handler */ static struct ceph_x_ticket_handler * get_ticket_handler(struct ceph_auth_client *ac, int service) { struct ceph_x_ticket_handler *th; struct ceph_x_info *xi = ac->private; struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node; while (*p) { parent = *p; th = rb_entry(parent, struct ceph_x_ticket_handler, node); if (service < th->service) p = &(*p)->rb_left; else if (service > th->service) p = &(*p)->rb_right; else return th; } /* add it */ th = kzalloc(sizeof(*th), GFP_NOFS); if (!th) return ERR_PTR(-ENOMEM); th->service = service; rb_link_node(&th->node, parent, p); rb_insert_color(&th->node, &xi->ticket_handlers); return th; } static void remove_ticket_handler(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th) { struct ceph_x_info *xi = ac->private; dout("remove_ticket_handler %p %d\n", th, th->service); rb_erase(&th->node, &xi->ticket_handlers); ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); kfree(th); } static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void *buf, void *end) { struct ceph_x_info *xi = ac->private; int num; void *p = buf; int ret; char *dbuf; char *ticket_buf; u8 reply_struct_v; dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); if (!dbuf) return -ENOMEM; ret = -ENOMEM; ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); if (!ticket_buf) goto out_dbuf; ceph_decode_need(&p, end, 1 + sizeof(u32), bad); reply_struct_v = ceph_decode_8(&p); if (reply_struct_v != 1) goto bad; num = ceph_decode_32(&p); dout("%d tickets\n", num); while (num--) { int type; u8 tkt_struct_v, blob_struct_v; struct ceph_x_ticket_handler *th; void *dp, *dend; int dlen; char is_enc; struct timespec validity; struct ceph_crypto_key old_key; void *tp, *tpend; struct ceph_timespec new_validity; struct ceph_crypto_key new_session_key; struct ceph_buffer *new_ticket_blob; unsigned long new_expires, new_renew_after; u64 new_secret_id; ceph_decode_need(&p, end, sizeof(u32) + 1, bad); type = ceph_decode_32(&p); dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); tkt_struct_v = ceph_decode_8(&p); if (tkt_struct_v != 1) goto bad; th = get_ticket_handler(ac, type); if (IS_ERR(th)) { ret = PTR_ERR(th); goto out; } /* blob for me */ dlen = ceph_x_decrypt(secret, &p, end, dbuf, TEMP_TICKET_BUF_LEN); if (dlen <= 0) { ret = dlen; goto out; } dout(" decrypted %d bytes\n", dlen); dend = dbuf + dlen; dp = dbuf; tkt_struct_v = ceph_decode_8(&dp); if (tkt_struct_v != 1) goto bad; memcpy(&old_key, &th->session_key, sizeof(old_key)); ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); if (ret) goto out; ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); ceph_decode_timespec(&validity, &new_validity); new_expires = get_seconds() + validity.tv_sec; new_renew_after = new_expires - (validity.tv_sec / 4); dout(" expires=%lu renew_after=%lu\n", new_expires, new_renew_after); /* ticket blob for service */ ceph_decode_8_safe(&p, end, is_enc, bad); tp = ticket_buf; if (is_enc) { /* encrypted */ dout(" encrypted ticket\n"); dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf, TEMP_TICKET_BUF_LEN); if (dlen < 0) { ret = dlen; goto out; } dlen = ceph_decode_32(&tp); } else { /* unencrypted */ ceph_decode_32_safe(&p, end, dlen, bad); ceph_decode_need(&p, end, dlen, bad); ceph_decode_copy(&p, ticket_buf, dlen); } tpend = tp + dlen; dout(" ticket blob is %d bytes\n", dlen); ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); blob_struct_v = ceph_decode_8(&tp); new_secret_id = ceph_decode_64(&tp); ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); if (ret) goto out; /* all is well, update our ticket */ ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); th->session_key = new_session_key; th->ticket_blob = new_ticket_blob; th->validity = new_validity; th->secret_id = new_secret_id; th->expires = new_expires; th->renew_after = new_renew_after; dout(" got ticket service %d (%s) secret_id %lld len %d\n", type, ceph_entity_type_name(type), th->secret_id, (int)th->ticket_blob->vec.iov_len); xi->have_keys |= th->service; } ret = 0; out: kfree(ticket_buf); out_dbuf: kfree(dbuf); return ret; bad: ret = -EINVAL; goto out; } static int ceph_x_build_authorizer(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th, struct ceph_x_authorizer *au) { int maxlen; struct ceph_x_authorize_a *msg_a; struct ceph_x_authorize_b msg_b; void *p, *end; int ret; int ticket_blob_len = (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0); dout("build_authorizer for %s %p\n", ceph_entity_type_name(th->service), au); maxlen = sizeof(*msg_a) + sizeof(msg_b) + ceph_x_encrypt_buflen(ticket_blob_len); dout(" need len %d\n", maxlen); if (au->buf && au->buf->alloc_len < maxlen) { ceph_buffer_put(au->buf); au->buf = NULL; } if (!au->buf) { au->buf = ceph_buffer_new(maxlen, GFP_NOFS); if (!au->buf) return -ENOMEM; } au->service = th->service; msg_a = au->buf->vec.iov_base; msg_a->struct_v = 1; msg_a->global_id = cpu_to_le64(ac->global_id); msg_a->service_id = cpu_to_le32(th->service); msg_a->ticket_blob.struct_v = 1; msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id); msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len); if (ticket_blob_len) { memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base, th->ticket_blob->vec.iov_len); } dout(" th %p secret_id %lld %lld\n", th, th->secret_id, le64_to_cpu(msg_a->ticket_blob.secret_id)); p = msg_a + 1; p += ticket_blob_len; end = au->buf->vec.iov_base + au->buf->vec.iov_len; get_random_bytes(&au->nonce, sizeof(au->nonce)); msg_b.struct_v = 1; msg_b.nonce = cpu_to_le64(au->nonce); ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b), p, end - p); if (ret < 0) goto out_buf; p += ret; au->buf->vec.iov_len = p - au->buf->vec.iov_base; dout(" built authorizer nonce %llx len %d\n", au->nonce, (int)au->buf->vec.iov_len); BUG_ON(au->buf->vec.iov_len > maxlen); return 0; out_buf: ceph_buffer_put(au->buf); au->buf = NULL; return ret; } static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th, void **p, void *end) { ceph_decode_need(p, end, 1 + sizeof(u64), bad); ceph_encode_8(p, 1); ceph_encode_64(p, th->secret_id); if (th->ticket_blob) { const char *buf = th->ticket_blob->vec.iov_base; u32 len = th->ticket_blob->vec.iov_len; ceph_encode_32_safe(p, end, len, bad); ceph_encode_copy_safe(p, end, buf, len, bad); } else { ceph_encode_32_safe(p, end, 0, bad); } return 0; bad: return -ERANGE; } static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed) { int want = ac->want_keys; struct ceph_x_info *xi = ac->private; int service; *pneed = ac->want_keys & ~(xi->have_keys); for (service = 1; service <= want; service <<= 1) { struct ceph_x_ticket_handler *th; if (!(ac->want_keys & service)) continue; if (*pneed & service) continue; th = get_ticket_handler(ac, service); if (IS_ERR(th)) { *pneed |= service; continue; } if (get_seconds() >= th->renew_after) *pneed |= service; if (get_seconds() >= th->expires) xi->have_keys &= ~service; } } static int ceph_x_build_request(struct ceph_auth_client *ac, void *buf, void *end) { struct ceph_x_info *xi = ac->private; int need; struct ceph_x_request_header *head = buf; int ret; struct ceph_x_ticket_handler *th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ceph_x_validate_tickets(ac, &need); dout("build_request want %x have %x need %x\n", ac->want_keys, xi->have_keys, need); if (need & CEPH_ENTITY_TYPE_AUTH) { struct ceph_x_authenticate *auth = (void *)(head + 1); void *p = auth + 1; struct ceph_x_challenge_blob tmp; char tmp_enc[40]; u64 *u; if (p > end) return -ERANGE; dout(" get_auth_session_key\n"); head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY); /* encrypt and hash */ get_random_bytes(&auth->client_challenge, sizeof(u64)); tmp.client_challenge = auth->client_challenge; tmp.server_challenge = cpu_to_le64(xi->server_challenge); ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp), tmp_enc, sizeof(tmp_enc)); if (ret < 0) return ret; auth->struct_v = 1; auth->key = 0; for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++) auth->key ^= *(__le64 *)u; dout(" server_challenge %llx client_challenge %llx key %llx\n", xi->server_challenge, le64_to_cpu(auth->client_challenge), le64_to_cpu(auth->key)); /* now encode the old ticket if exists */ ret = ceph_x_encode_ticket(th, &p, end); if (ret < 0) return ret; return p - buf; } if (need) { void *p = head + 1; struct ceph_x_service_ticket_request *req; if (p > end) return -ERANGE; head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer); if (ret) return ret; ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base, xi->auth_authorizer.buf->vec.iov_len); req = p; req->keys = cpu_to_le32(need); p += sizeof(*req); return p - buf; } return 0; } static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, void *buf, void *end) { struct ceph_x_info *xi = ac->private; struct ceph_x_reply_header *head = buf; struct ceph_x_ticket_handler *th; int len = end - buf; int op; int ret; if (result) return result; /* XXX hmm? */ if (xi->starting) { /* it's a hello */ struct ceph_x_server_challenge *sc = buf; if (len != sizeof(*sc)) return -EINVAL; xi->server_challenge = le64_to_cpu(sc->server_challenge); dout("handle_reply got server challenge %llx\n", xi->server_challenge); xi->starting = false; xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH; return -EAGAIN; } op = le16_to_cpu(head->op); result = le32_to_cpu(head->result); dout("handle_reply op %d result %d\n", op, result); switch (op) { case CEPHX_GET_AUTH_SESSION_KEY: /* verify auth key */ ret = ceph_x_proc_ticket_reply(ac, &xi->secret, buf + sizeof(*head), end); break; case CEPHX_GET_PRINCIPAL_SESSION_KEY: th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_proc_ticket_reply(ac, &th->session_key, buf + sizeof(*head), end); break; default: return -EINVAL; } if (ret) return ret; if (ac->want_keys == xi->have_keys) return 0; return -EAGAIN; } static int ceph_x_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_authorizer **a, void **buf, size_t *len, void **reply_buf, size_t *reply_len) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; int ret; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = kzalloc(sizeof(*au), GFP_NOFS); if (!au) return -ENOMEM; ret = ceph_x_build_authorizer(ac, th, au); if (ret) { kfree(au); return ret; } *a = (struct ceph_authorizer *)au; *buf = au->buf->vec.iov_base; *len = au->buf->vec.iov_len; *reply_buf = au->reply_buf; *reply_len = sizeof(au->reply_buf); return 0; } static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len) { struct ceph_x_authorizer *au = (void *)a; struct ceph_x_ticket_handler *th; int ret = 0; struct ceph_x_authorize_reply reply; void *p = au->reply_buf; void *end = p + sizeof(au->reply_buf); th = get_ticket_handler(ac, au->service); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); if (ret < 0) return ret; if (ret != sizeof(reply)) return -EPERM; if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) ret = -EPERM; else ret = 0; dout("verify_authorizer_reply nonce %llx got %llx ret %d\n", au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); return ret; } static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a) { struct ceph_x_authorizer *au = (void *)a; ceph_buffer_put(au->buf); kfree(au); } static void ceph_x_reset(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; dout("reset\n"); xi->starting = true; xi->server_challenge = 0; } static void ceph_x_destroy(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; struct rb_node *p; dout("ceph_x_destroy %p\n", ac); ceph_crypto_key_destroy(&xi->secret); while ((p = rb_first(&xi->ticket_handlers)) != NULL) { struct ceph_x_ticket_handler *th = rb_entry(p, struct ceph_x_ticket_handler, node); remove_ticket_handler(ac, th); } if (xi->auth_authorizer.buf) ceph_buffer_put(xi->auth_authorizer.buf); kfree(ac->private); ac->private = NULL; } static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) { struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (!IS_ERR(th)) remove_ticket_handler(ac, th); } static const struct ceph_auth_client_ops ceph_x_ops = { .name = "x", .is_authenticated = ceph_x_is_authenticated, .should_authenticate = ceph_x_should_authenticate, .build_request = ceph_x_build_request, .handle_reply = ceph_x_handle_reply, .create_authorizer = ceph_x_create_authorizer, .verify_authorizer_reply = ceph_x_verify_authorizer_reply, .destroy_authorizer = ceph_x_destroy_authorizer, .invalidate_authorizer = ceph_x_invalidate_authorizer, .reset = ceph_x_reset, .destroy = ceph_x_destroy, }; int ceph_x_init(struct ceph_auth_client *ac) { struct ceph_x_info *xi; int ret; dout("ceph_x_init %p\n", ac); ret = -ENOMEM; xi = kzalloc(sizeof(*xi), GFP_NOFS); if (!xi) goto out; ret = -EINVAL; if (!ac->key) { pr_err("no secret set (for auth_x protocol)\n"); goto out_nomem; } ret = ceph_crypto_key_clone(&xi->secret, ac->key); if (ret < 0) { pr_err("cannot clone key: %d\n", ret); goto out_nomem; } xi->starting = true; xi->ticket_handlers = RB_ROOT; ac->protocol = CEPH_AUTH_CEPHX; ac->private = xi; ac->ops = &ceph_x_ops; return 0; out_nomem: kfree(xi); out: return ret; }
gpl-2.0
sebirdman/kernel_m7
arch/mips/pci/ops-bcm63xx.c
9020
11120
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/io.h> #include "pci-bcm63xx.h" /* * swizzle 32bits data to return only the needed part */ static int postprocess_read(u32 data, int where, unsigned int size) { u32 ret; ret = 0; switch (size) { case 1: ret = (data >> ((where & 3) << 3)) & 0xff; break; case 2: ret = (data >> ((where & 3) << 3)) & 0xffff; break; case 4: ret = data; break; } return ret; } static int preprocess_write(u32 orig_data, u32 val, int where, unsigned int size) { u32 ret; ret = 0; switch (size) { case 1: ret = (orig_data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); break; case 2: ret = (orig_data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); break; case 4: ret = val; break; } return ret; } /* * setup hardware for a configuration cycle with given parameters */ static int bcm63xx_setup_cfg_access(int type, unsigned int busn, unsigned int devfn, int where) { unsigned int slot, func, reg; u32 val; slot = PCI_SLOT(devfn); func = PCI_FUNC(devfn); reg = where >> 2; /* sanity check */ if (slot > (MPI_L2PCFG_DEVNUM_MASK >> MPI_L2PCFG_DEVNUM_SHIFT)) return 1; if (func > (MPI_L2PCFG_FUNC_MASK >> MPI_L2PCFG_FUNC_SHIFT)) return 1; if (reg > (MPI_L2PCFG_REG_MASK >> MPI_L2PCFG_REG_SHIFT)) return 1; /* ok, setup config access */ val = (reg << MPI_L2PCFG_REG_SHIFT); val |= (func << MPI_L2PCFG_FUNC_SHIFT); val |= (slot << MPI_L2PCFG_DEVNUM_SHIFT); val |= MPI_L2PCFG_CFG_USEREG_MASK; val |= MPI_L2PCFG_CFG_SEL_MASK; /* type 0 cycle for local bus, type 1 cycle for anything else */ if (type != 0) { /* FIXME: how to specify bus ??? */ val |= (1 << MPI_L2PCFG_CFG_TYPE_SHIFT); } bcm_mpi_writel(val, MPI_L2PCFG_REG); return 0; } static int bcm63xx_do_cfg_read(int type, unsigned int busn, unsigned int devfn, int where, int size, u32 *val) { u32 data; /* two phase cycle, first we write address, then read data at * another location, caller already has a spinlock so no need * to add one here */ if (bcm63xx_setup_cfg_access(type, busn, devfn, where)) return PCIBIOS_DEVICE_NOT_FOUND; iob(); data = le32_to_cpu(__raw_readl(pci_iospace_start)); /* restore IO space normal behaviour */ bcm_mpi_writel(0, MPI_L2PCFG_REG); *val = postprocess_read(data, where, size); return PCIBIOS_SUCCESSFUL; } static int bcm63xx_do_cfg_write(int type, unsigned int busn, unsigned int devfn, int where, int size, u32 val) { u32 data; /* two phase cycle, first we write address, then write data to * another location, caller already has a spinlock so no need * to add one here */ if (bcm63xx_setup_cfg_access(type, busn, devfn, where)) return PCIBIOS_DEVICE_NOT_FOUND; iob(); data = le32_to_cpu(__raw_readl(pci_iospace_start)); data = preprocess_write(data, val, where, size); __raw_writel(cpu_to_le32(data), pci_iospace_start); wmb(); /* no way to know the access is done, we have to wait */ udelay(500); /* restore IO space normal behaviour */ bcm_mpi_writel(0, MPI_L2PCFG_REG); return PCIBIOS_SUCCESSFUL; } static int bcm63xx_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { int type; type = bus->parent ? 1 : 0; if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL) return PCIBIOS_DEVICE_NOT_FOUND; return bcm63xx_do_cfg_read(type, bus->number, devfn, where, size, val); } static int bcm63xx_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { int type; type = bus->parent ? 1 : 0; if (type == 0 && PCI_SLOT(devfn) == CARDBUS_PCI_IDSEL) return PCIBIOS_DEVICE_NOT_FOUND; return bcm63xx_do_cfg_write(type, bus->number, devfn, where, size, val); } struct pci_ops bcm63xx_pci_ops = { .read = bcm63xx_pci_read, .write = bcm63xx_pci_write }; #ifdef CONFIG_CARDBUS /* * emulate configuration read access on a cardbus bridge */ #define FAKE_CB_BRIDGE_SLOT 0x1e static int fake_cb_bridge_bus_number = -1; static struct { u16 pci_command; u8 cb_latency; u8 subordinate_busn; u8 cardbus_busn; u8 pci_busn; int bus_assigned; u16 bridge_control; u32 mem_base0; u32 mem_limit0; u32 mem_base1; u32 mem_limit1; u32 io_base0; u32 io_limit0; u32 io_base1; u32 io_limit1; } fake_cb_bridge_regs; static int fake_cb_bridge_read(int where, int size, u32 *val) { unsigned int reg; u32 data; data = 0; reg = where >> 2; switch (reg) { case (PCI_VENDOR_ID >> 2): case (PCI_CB_SUBSYSTEM_VENDOR_ID >> 2): /* create dummy vendor/device id from our cpu id */ data = (bcm63xx_get_cpu_id() << 16) | PCI_VENDOR_ID_BROADCOM; break; case (PCI_COMMAND >> 2): data = (PCI_STATUS_DEVSEL_SLOW << 16); data |= fake_cb_bridge_regs.pci_command; break; case (PCI_CLASS_REVISION >> 2): data = (PCI_CLASS_BRIDGE_CARDBUS << 16); break; case (PCI_CACHE_LINE_SIZE >> 2): data = (PCI_HEADER_TYPE_CARDBUS << 16); break; case (PCI_INTERRUPT_LINE >> 2): /* bridge control */ data = (fake_cb_bridge_regs.bridge_control << 16); /* pin:intA line:0xff */ data |= (0x1 << 8) | 0xff; break; case (PCI_CB_PRIMARY_BUS >> 2): data = (fake_cb_bridge_regs.cb_latency << 24); data |= (fake_cb_bridge_regs.subordinate_busn << 16); data |= (fake_cb_bridge_regs.cardbus_busn << 8); data |= fake_cb_bridge_regs.pci_busn; break; case (PCI_CB_MEMORY_BASE_0 >> 2): data = fake_cb_bridge_regs.mem_base0; break; case (PCI_CB_MEMORY_LIMIT_0 >> 2): data = fake_cb_bridge_regs.mem_limit0; break; case (PCI_CB_MEMORY_BASE_1 >> 2): data = fake_cb_bridge_regs.mem_base1; break; case (PCI_CB_MEMORY_LIMIT_1 >> 2): data = fake_cb_bridge_regs.mem_limit1; break; case (PCI_CB_IO_BASE_0 >> 2): /* | 1 for 32bits io support */ data = fake_cb_bridge_regs.io_base0 | 0x1; break; case (PCI_CB_IO_LIMIT_0 >> 2): data = fake_cb_bridge_regs.io_limit0; break; case (PCI_CB_IO_BASE_1 >> 2): /* | 1 for 32bits io support */ data = fake_cb_bridge_regs.io_base1 | 0x1; break; case (PCI_CB_IO_LIMIT_1 >> 2): data = fake_cb_bridge_regs.io_limit1; break; } *val = postprocess_read(data, where, size); return PCIBIOS_SUCCESSFUL; } /* * emulate configuration write access on a cardbus bridge */ static int fake_cb_bridge_write(int where, int size, u32 val) { unsigned int reg; u32 data, tmp; int ret; ret = fake_cb_bridge_read((where & ~0x3), 4, &data); if (ret != PCIBIOS_SUCCESSFUL) return ret; data = preprocess_write(data, val, where, size); reg = where >> 2; switch (reg) { case (PCI_COMMAND >> 2): fake_cb_bridge_regs.pci_command = (data & 0xffff); break; case (PCI_CB_PRIMARY_BUS >> 2): fake_cb_bridge_regs.cb_latency = (data >> 24) & 0xff; fake_cb_bridge_regs.subordinate_busn = (data >> 16) & 0xff; fake_cb_bridge_regs.cardbus_busn = (data >> 8) & 0xff; fake_cb_bridge_regs.pci_busn = data & 0xff; if (fake_cb_bridge_regs.cardbus_busn) fake_cb_bridge_regs.bus_assigned = 1; break; case (PCI_INTERRUPT_LINE >> 2): tmp = (data >> 16) & 0xffff; /* disable memory prefetch support */ tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; tmp &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1; fake_cb_bridge_regs.bridge_control = tmp; break; case (PCI_CB_MEMORY_BASE_0 >> 2): fake_cb_bridge_regs.mem_base0 = data; break; case (PCI_CB_MEMORY_LIMIT_0 >> 2): fake_cb_bridge_regs.mem_limit0 = data; break; case (PCI_CB_MEMORY_BASE_1 >> 2): fake_cb_bridge_regs.mem_base1 = data; break; case (PCI_CB_MEMORY_LIMIT_1 >> 2): fake_cb_bridge_regs.mem_limit1 = data; break; case (PCI_CB_IO_BASE_0 >> 2): fake_cb_bridge_regs.io_base0 = data; break; case (PCI_CB_IO_LIMIT_0 >> 2): fake_cb_bridge_regs.io_limit0 = data; break; case (PCI_CB_IO_BASE_1 >> 2): fake_cb_bridge_regs.io_base1 = data; break; case (PCI_CB_IO_LIMIT_1 >> 2): fake_cb_bridge_regs.io_limit1 = data; break; } return PCIBIOS_SUCCESSFUL; } static int bcm63xx_cb_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { /* snoop access to slot 0x1e on root bus, we fake a cardbus * bridge at this location */ if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) { fake_cb_bridge_bus_number = bus->number; return fake_cb_bridge_read(where, size, val); } /* a configuration cycle for the device behind the cardbus * bridge is actually done as a type 0 cycle on the primary * bus. This means that only one device can be on the cardbus * bus */ if (fake_cb_bridge_regs.bus_assigned && bus->number == fake_cb_bridge_regs.cardbus_busn && PCI_SLOT(devfn) == 0) return bcm63xx_do_cfg_read(0, 0, PCI_DEVFN(CARDBUS_PCI_IDSEL, 0), where, size, val); return PCIBIOS_DEVICE_NOT_FOUND; } static int bcm63xx_cb_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { if (!bus->parent && PCI_SLOT(devfn) == FAKE_CB_BRIDGE_SLOT) { fake_cb_bridge_bus_number = bus->number; return fake_cb_bridge_write(where, size, val); } if (fake_cb_bridge_regs.bus_assigned && bus->number == fake_cb_bridge_regs.cardbus_busn && PCI_SLOT(devfn) == 0) return bcm63xx_do_cfg_write(0, 0, PCI_DEVFN(CARDBUS_PCI_IDSEL, 0), where, size, val); return PCIBIOS_DEVICE_NOT_FOUND; } struct pci_ops bcm63xx_cb_ops = { .read = bcm63xx_cb_read, .write = bcm63xx_cb_write, }; /* * only one IO window, so it cannot be shared by PCI and cardbus, use * fixup to choose and detect unhandled configuration */ static void bcm63xx_fixup(struct pci_dev *dev) { static int io_window = -1; int i, found, new_io_window; u32 val; /* look for any io resource */ found = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (pci_resource_flags(dev, i) & IORESOURCE_IO) { found = 1; break; } } if (!found) return; /* skip our fake bus with only cardbus bridge on it */ if (dev->bus->number == fake_cb_bridge_bus_number) return; /* find on which bus the device is */ if (fake_cb_bridge_regs.bus_assigned && dev->bus->number == fake_cb_bridge_regs.cardbus_busn && PCI_SLOT(dev->devfn) == 0) new_io_window = 1; else new_io_window = 0; if (new_io_window == io_window) return; if (io_window != -1) { printk(KERN_ERR "bcm63xx: both PCI and cardbus devices " "need IO, which hardware cannot do\n"); return; } printk(KERN_INFO "bcm63xx: PCI IO window assigned to %s\n", (new_io_window == 0) ? "PCI" : "cardbus"); val = bcm_mpi_readl(MPI_L2PIOREMAP_REG); if (io_window) val |= MPI_L2PREMAP_IS_CARDBUS_MASK; else val &= ~MPI_L2PREMAP_IS_CARDBUS_MASK; bcm_mpi_writel(val, MPI_L2PIOREMAP_REG); io_window = new_io_window; } DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_fixup); #endif
gpl-2.0
OptimusG-Dev-Team/proj-kernel
arch/alpha/kernel/sys_eb64p.c
9020
5747
/* * linux/arch/alpha/kernel/sys_eb64p.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the EB64+ and EB66. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/core_apecs.h> #include <asm/core_lca.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned int cached_irq_mask = -1; static inline void eb64p_update_irq_hw(unsigned int irq, unsigned long mask) { outb(mask >> (irq >= 24 ? 24 : 16), (irq >= 24 ? 0x27 : 0x26)); } static inline void eb64p_enable_irq(struct irq_data *d) { eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); } static void eb64p_disable_irq(struct irq_data *d) { eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); } static struct irq_chip eb64p_irq_type = { .name = "EB64P", .irq_unmask = eb64p_enable_irq, .irq_mask = eb64p_disable_irq, .irq_mask_ack = eb64p_disable_irq, }; static void eb64p_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary registers */ pld = inb(0x26) | (inb(0x27) << 8); /* * Now, for every possible bit set, work through * them and call the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 5) { isa_device_interrupt(vector); } else { handle_irq(16 + i); } } } static void __init eb64p_init_irq(void) { long i; #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET) /* * CABRIO SRM may not set variation correctly, so here we test * the high word of the interrupt summary register for the RAZ * bits, and hope that a true EB64+ would read all ones... */ if (inw(0x806) != 0xffff) { extern struct alpha_machine_vector cabriolet_mv; printk("Detected Cabriolet: correcting HWRPB.\n"); hwrpb->sys_variation |= 2L << 10; hwrpb_update_checksum(hwrpb); alpha_mv = cabriolet_mv; alpha_mv.init_irq(); return; } #endif /* GENERIC */ outb(0xff, 0x26); outb(0xff, 0x27); init_i8259a_irqs(); for (i = 16; i < 32; ++i) { irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } common_init_isa_dma(); setup_irq(16+5, &isa_cascade_irqaction); } /* * PCI Fixup configuration. * * There are two 8 bit external summary registers as follows: * * Summary @ 0x26: * Bit Meaning * 0 Interrupt Line A from slot 0 * 1 Interrupt Line A from slot 1 * 2 Interrupt Line B from slot 0 * 3 Interrupt Line B from slot 1 * 4 Interrupt Line C from slot 0 * 5 Interrupt line from the two ISA PICs * 6 Tulip * 7 NCR SCSI * * Summary @ 0x27 * Bit Meaning * 0 Interrupt Line C from slot 1 * 1 Interrupt Line D from slot 0 * 2 Interrupt Line D from slot 1 * 3 RAZ * 4 RAZ * 5 RAZ * 6 RAZ * 7 RAZ * * The device to slot mapping looks like: * * Slot Device * 5 NCR SCSI controller * 6 PCI on board slot 0 * 7 PCI on board slot 1 * 8 Intel SIO PCI-ISA bridge chip * 9 Tulip - DECchip 21040 Ethernet controller * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int __init eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[5][5] __initdata = { /*INT INTA INTB INTC INTD */ {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ {16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ {16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 9, TULIP */ }; const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } /* * The System Vector */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P) struct alpha_machine_vector eb64p_mv __initmv = { .vector_name = "EB64+", DO_EV4_MMU, DO_DEFAULT_RTC, DO_APECS_IO, .machine_check = apecs_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 32, .device_interrupt = eb64p_device_interrupt, .init_arch = apecs_init_arch, .init_irq = eb64p_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = NULL, .pci_map_irq = eb64p_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(eb64p) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66) struct alpha_machine_vector eb66_mv __initmv = { .vector_name = "EB66", DO_EV4_MMU, DO_DEFAULT_RTC, DO_LCA_IO, .machine_check = lca_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 32, .device_interrupt = eb64p_device_interrupt, .init_arch = lca_init_arch, .init_irq = eb64p_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .pci_map_irq = eb64p_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(eb66) #endif
gpl-2.0
anjali009/linux
arch/alpha/kernel/sys_noritake.c
9020
9186
/* * linux/arch/alpha/kernel/sys_noritake.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the NORITAKE (AlphaServer 1000A), * CORELLE (AlphaServer 800), and ALCOR Primo (AlphaStation 600A). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/mce.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/core_apecs.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for ENABLED irqs. */ static int cached_irq_mask; static inline void noritake_update_irq_hw(int irq, int mask) { int port = 0x54a; if (irq >= 32) { mask >>= 16; port = 0x54c; } outw(mask, port); } static void noritake_enable_irq(struct irq_data *d) { noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); } static void noritake_disable_irq(struct irq_data *d) { noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); } static struct irq_chip noritake_irq_type = { .name = "NORITAKE", .irq_unmask = noritake_enable_irq, .irq_mask = noritake_disable_irq, .irq_mask_ack = noritake_disable_irq, }; static void noritake_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary registers of NORITAKE */ pld = (((unsigned long) inw(0x54c) << 32) | ((unsigned long) inw(0x54a) << 16) | ((unsigned long) inb(0xa0) << 8) | inb(0x20)); /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i < 16) { isa_device_interrupt(vector); } else { handle_irq(i); } } } static void noritake_srm_device_interrupt(unsigned long vector) { int irq; irq = (vector - 0x800) >> 4; /* * I really hate to do this, too, but the NORITAKE SRM console also * reports PCI vectors *lower* than I expected from the bit numbers * in the documentation. * But I really don't want to change the fixup code for allocation * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which * look nice and clean now. * So, here's this additional grotty hack... :-( */ if (irq >= 16) irq = irq + 1; handle_irq(irq); } static void __init noritake_init_irq(void) { long i; if (alpha_using_srm) alpha_mv.device_interrupt = noritake_srm_device_interrupt; outw(0, 0x54a); outw(0, 0x54c); for (i = 16; i < 48; ++i) { irq_set_chip_and_handler(i, &noritake_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); common_init_isa_dma(); } /* * PCI Fixup configuration. * * Summary @ 0x542, summary register #1: * Bit Meaning * 0 All valid ints from summary regs 2 & 3 * 1 QLOGIC ISP1020A SCSI * 2 Interrupt Line A from slot 0 * 3 Interrupt Line B from slot 0 * 4 Interrupt Line A from slot 1 * 5 Interrupt line B from slot 1 * 6 Interrupt Line A from slot 2 * 7 Interrupt Line B from slot 2 * 8 Interrupt Line A from slot 3 * 9 Interrupt Line B from slot 3 *10 Interrupt Line A from slot 4 *11 Interrupt Line B from slot 4 *12 Interrupt Line A from slot 5 *13 Interrupt Line B from slot 5 *14 Interrupt Line A from slot 6 *15 Interrupt Line B from slot 6 * * Summary @ 0x544, summary register #2: * Bit Meaning * 0 OR of all unmasked ints in SR #2 * 1 OR of secondary bus ints * 2 Interrupt Line C from slot 0 * 3 Interrupt Line D from slot 0 * 4 Interrupt Line C from slot 1 * 5 Interrupt line D from slot 1 * 6 Interrupt Line C from slot 2 * 7 Interrupt Line D from slot 2 * 8 Interrupt Line C from slot 3 * 9 Interrupt Line D from slot 3 *10 Interrupt Line C from slot 4 *11 Interrupt Line D from slot 4 *12 Interrupt Line C from slot 5 *13 Interrupt Line D from slot 5 *14 Interrupt Line C from slot 6 *15 Interrupt Line D from slot 6 * * The device to slot mapping looks like: * * Slot Device * 7 Intel PCI-EISA bridge chip * 8 DEC PCI-PCI bridge chip * 11 PCI on board slot 0 * 12 PCI on board slot 1 * 13 PCI on board slot 2 * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int __init noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[15][5] __initdata = { /*INT INTA INTB INTC INTD */ /* note: IDSELs 16, 17, and 25 are CORELLE only */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ { -1, -1, -1, -1, -1}, /* IdSel 17, S3 Trio64 */ { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ { -1, -1, -1, -1, -1}, /* IdSel 19, PPB */ { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */ { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */ { 16+2, 16+2, 16+3, 32+2, 32+3}, /* IdSel 22, slot 0 */ { 16+4, 16+4, 16+5, 32+4, 32+5}, /* IdSel 23, slot 1 */ { 16+6, 16+6, 16+7, 32+6, 32+7}, /* IdSel 24, slot 2 */ { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 25, slot 3 */ /* The following 5 are actually on PCI bus 1, which is across the built-in bridge of the NORITAKE only. */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 17, slot 3 */ {16+10, 16+10, 16+11, 32+10, 32+11}, /* IdSel 18, slot 4 */ {16+12, 16+12, 16+13, 32+12, 32+13}, /* IdSel 19, slot 5 */ {16+14, 16+14, 16+15, 32+14, 32+15}, /* IdSel 20, slot 6 */ }; const long min_idsel = 5, max_idsel = 19, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static u8 __init noritake_swizzle(struct pci_dev *dev, u8 *pinp) { int slot, pin = *pinp; if (dev->bus->number == 0) { slot = PCI_SLOT(dev->devfn); } /* Check for the built-in bridge */ else if (PCI_SLOT(dev->bus->self->devfn) == 8) { slot = PCI_SLOT(dev->devfn) + 15; /* WAG! */ } else { /* Must be a card-based bridge. */ do { if (PCI_SLOT(dev->bus->self->devfn) == 8) { slot = PCI_SLOT(dev->devfn) + 15; break; } pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; /* Slot of the next bridge. */ slot = PCI_SLOT(dev->devfn); } while (dev->bus->self); } *pinp = pin; return slot; } #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) static void noritake_apecs_machine_check(unsigned long vector, unsigned long la_ptr) { #define MCHK_NO_DEVSEL 0x205U #define MCHK_NO_TABT 0x204U struct el_common *mchk_header; unsigned int code; mchk_header = (struct el_common *)la_ptr; /* Clear the error before any reporting. */ mb(); mb(); /* magic */ draina(); apecs_pci_clr_err(); wrmces(0x7); mb(); code = mchk_header->code; process_mcheck_info(vector, la_ptr, "NORITAKE APECS", (mcheck_expected(0) && (code == MCHK_NO_DEVSEL || code == MCHK_NO_TABT))); } #endif /* * The System Vectors */ #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) struct alpha_machine_vector noritake_mv __initmv = { .vector_name = "Noritake", DO_EV4_MMU, DO_DEFAULT_RTC, DO_APECS_IO, .machine_check = noritake_apecs_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = noritake_device_interrupt, .init_arch = apecs_init_arch, .init_irq = noritake_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .pci_map_irq = noritake_map_irq, .pci_swizzle = noritake_swizzle, }; ALIAS_MV(noritake) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO) struct alpha_machine_vector noritake_primo_mv __initmv = { .vector_name = "Noritake-Primo", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = noritake_device_interrupt, .init_arch = cia_init_arch, .init_irq = noritake_init_irq, .init_rtc = common_init_rtc, .init_pci = cia_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = noritake_map_irq, .pci_swizzle = noritake_swizzle, }; ALIAS_MV(noritake_primo) #endif
gpl-2.0
aldanopolis/android_kernel_motorola_msm8226
arch/powerpc/sysdev/mmio_nvram.c
9532
3802
/* * memory mapped NVRAM * * (C) Copyright IBM Corp. 2005 * * Authors : Utz Bacher <utz.bacher@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/machdep.h> #include <asm/nvram.h> #include <asm/prom.h> static void __iomem *mmio_nvram_start; static long mmio_nvram_len; static DEFINE_SPINLOCK(mmio_nvram_lock); static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index) { unsigned long flags; if (*index >= mmio_nvram_len) return 0; if (*index + count > mmio_nvram_len) count = mmio_nvram_len - *index; spin_lock_irqsave(&mmio_nvram_lock, flags); memcpy_fromio(buf, mmio_nvram_start + *index, count); spin_unlock_irqrestore(&mmio_nvram_lock, flags); *index += count; return count; } static unsigned char mmio_nvram_read_val(int addr) { unsigned long flags; unsigned char val; if (addr >= mmio_nvram_len) return 0xff; spin_lock_irqsave(&mmio_nvram_lock, flags); val = ioread8(mmio_nvram_start + addr); spin_unlock_irqrestore(&mmio_nvram_lock, flags); return val; } static ssize_t mmio_nvram_write(char *buf, size_t count, loff_t *index) { unsigned long flags; if (*index >= mmio_nvram_len) return 0; if (*index + count > mmio_nvram_len) count = mmio_nvram_len - *index; spin_lock_irqsave(&mmio_nvram_lock, flags); memcpy_toio(mmio_nvram_start + *index, buf, count); spin_unlock_irqrestore(&mmio_nvram_lock, flags); *index += count; return count; } void mmio_nvram_write_val(int addr, unsigned char val) { unsigned long flags; if (addr < mmio_nvram_len) { spin_lock_irqsave(&mmio_nvram_lock, flags); iowrite8(val, mmio_nvram_start + addr); spin_unlock_irqrestore(&mmio_nvram_lock, flags); } } static ssize_t mmio_nvram_get_size(void) { return mmio_nvram_len; } int __init mmio_nvram_init(void) { struct device_node *nvram_node; unsigned long nvram_addr; struct resource r; int ret; nvram_node = of_find_node_by_type(NULL, "nvram"); if (!nvram_node) nvram_node = of_find_compatible_node(NULL, NULL, "nvram"); if (!nvram_node) { printk(KERN_WARNING "nvram: no node found in device-tree\n"); return -ENODEV; } ret = of_address_to_resource(nvram_node, 0, &r); if (ret) { printk(KERN_WARNING "nvram: failed to get address (err %d)\n", ret); goto out; } nvram_addr = r.start; mmio_nvram_len = resource_size(&r); if ( (!mmio_nvram_len) || (!nvram_addr) ) { printk(KERN_WARNING "nvram: address or length is 0\n"); ret = -EIO; goto out; } mmio_nvram_start = ioremap(nvram_addr, mmio_nvram_len); if (!mmio_nvram_start) { printk(KERN_WARNING "nvram: failed to ioremap\n"); ret = -ENOMEM; goto out; } printk(KERN_INFO "mmio NVRAM, %luk at 0x%lx mapped to %p\n", mmio_nvram_len >> 10, nvram_addr, mmio_nvram_start); ppc_md.nvram_read_val = mmio_nvram_read_val; ppc_md.nvram_write_val = mmio_nvram_write_val; ppc_md.nvram_read = mmio_nvram_read; ppc_md.nvram_write = mmio_nvram_write; ppc_md.nvram_size = mmio_nvram_get_size; out: of_node_put(nvram_node); return ret; }
gpl-2.0
HinTak/linux
drivers/net/ethernet/netronome/nfp/nfp_main.c
61
21324
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_main.c * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> * Alejandro Lucero <alejandro.lucero@netronome.com> * Jason McMullan <jason.mcmullan@netronome.com> * Rolf Neugebauer <rolf.neugebauer@netronome.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/vmalloc.h> #include <net/devlink.h> #include "nfpcore/nfp.h" #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nffw.h" #include "nfpcore/nfp_nsp.h" #include "nfpcore/nfp6000_pcie.h" #include "nfp_abi.h" #include "nfp_app.h" #include "nfp_main.h" #include "nfp_net.h" static const char nfp_driver_name[] = "nfp"; static const struct pci_device_id nfp_pci_device_ids[] = { { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, { 0, } /* Required last entry. */ }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val) { char name[256]; int err = 0; u64 val; snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { if (err == -ENOENT) return default_val; nfp_err(pf->cpp, "Unable to read symbol %s\n", name); return err; } return val; } u8 __iomem * nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area) { char pf_symbol[256]; snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, nfp_cppcore_pcie_unit(pf->cpp)); return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); } /* Callers should hold the devlink instance lock */ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, void *out_data, u64 out_length) { unsigned long err_at; u64 max_data_sz; u32 val = 0; int n, err; if (!pf->mbox) return -EOPNOTSUPP; max_data_sz = nfp_rtsym_size(pf->mbox) - NFP_MBOX_SYM_MIN_SIZE; /* Check if cmd field is clear */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err || val) { nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n", cmd, val, err); return err ?: -EBUSY; } in_length = min(in_length, max_data_sz); n = nfp_rtsym_write(pf->cpp, pf->mbox, NFP_MBOX_DATA, in_data, in_length); if (n != in_length) return -EIO; /* Write data_len and wipe reserved */ err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, in_length); if (err) return err; /* Read back for ordering */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; /* Write cmd and wipe return value */ err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_CMD, cmd); if (err) return err; err_at = jiffies + 5 * HZ; while (true) { /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err) return err; if (!val) break; if (time_is_before_eq_jiffies(err_at)) return -ETIMEDOUT; msleep(5); } /* Copy output if any (could be error info, do it before reading ret) */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; out_length = min_t(u32, val, min(out_length, max_data_sz)); n = nfp_rtsym_read(pf->cpp, pf->mbox, NFP_MBOX_DATA, out_data, out_length); if (n != out_length) return -EIO; /* Check if there is an error */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_RET, &val); if (err) return err; if (val) return -val; return out_length; } static bool nfp_board_ready(struct nfp_pf *pf) { const char *cp; long state; int err; cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); if (!cp) return false; err = kstrtol(cp, 0, &state); if (err < 0) return false; return state == 15; } static int nfp_pf_board_state_wait(struct nfp_pf *pf) { const unsigned long wait_until = jiffies + 10 * HZ; while (!nfp_board_ready(pf)) { if (time_is_before_eq_jiffies(wait_until)) { nfp_err(pf->cpp, "NFP board initialization timeout\n"); return -EINVAL; } nfp_info(pf->cpp, "waiting for board initialization\n"); if (msleep_interruptible(500)) return -ERESTARTSYS; /* Refresh cached information */ kfree(pf->hwinfo); pf->hwinfo = nfp_hwinfo_read(pf->cpp); } return 0; } static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { int err; pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); if (err) { /* For backwards compatibility if symbol not found allow all */ pf->limit_vfs = ~0; if (err == -ENOENT) return 0; nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); return err; } err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); if (err) nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); return 0; } static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct nfp_pf *pf = pci_get_drvdata(pdev); int err; if (num_vfs > pf->limit_vfs) { nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", pf->limit_vfs); return -EINVAL; } err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err); return err; } mutex_lock(&pf->lock); err = nfp_app_sriov_enable(pf->app, num_vfs); if (err) { dev_warn(&pdev->dev, "App specific PCI SR-IOV configuration failed: %d\n", err); goto err_sriov_disable; } pf->num_vfs = num_vfs; dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); mutex_unlock(&pf->lock); return num_vfs; err_sriov_disable: mutex_unlock(&pf->lock); pci_disable_sriov(pdev); return err; #endif return 0; } static int nfp_pcie_sriov_disable(struct pci_dev *pdev) { #ifdef CONFIG_PCI_IOV struct nfp_pf *pf = pci_get_drvdata(pdev); mutex_lock(&pf->lock); /* If the VFs are assigned we cannot shut down SR-IOV without * causing issues, so just leave the hardware available but * disabled */ if (pci_vfs_assigned(pdev)) { dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n"); mutex_unlock(&pf->lock); return -EPERM; } nfp_app_sriov_disable(pf->app); pf->num_vfs = 0; mutex_unlock(&pf->lock); pci_disable_sriov(pdev); dev_dbg(&pdev->dev, "Removed VFs.\n"); #endif return 0; } static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) { if (!pci_get_drvdata(pdev)) return -ENOENT; if (num_vfs == 0) return nfp_pcie_sriov_disable(pdev); else return nfp_pcie_sriov_enable(pdev, num_vfs); } int nfp_flash_update_common(struct nfp_pf *pf, const struct firmware *fw, struct netlink_ext_ack *extack) { struct device *dev = &pf->pdev->dev; struct nfp_nsp *nsp; int err; nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { err = PTR_ERR(nsp); if (extack) NL_SET_ERR_MSG_MOD(extack, "can't access NSP"); else dev_err(dev, "Failed to access the NSP: %d\n", err); return err; } err = nfp_nsp_write_flash(nsp, fw); if (err < 0) goto exit_close_nsp; dev_info(dev, "Finished writing flash image\n"); err = 0; exit_close_nsp: nfp_nsp_close(nsp); return err; } static const struct firmware * nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) { const struct firmware *fw = NULL; int err; err = request_firmware_direct(&fw, name, &pdev->dev); nfp_info(pf->cpp, " %s: %s\n", name, err ? "not found" : "found"); if (err) return NULL; return fw; } /** * nfp_net_fw_find() - Find the correct firmware image for netdev mode * @pdev: PCI Device structure * @pf: NFP PF Device structure * * Return: firmware if found and requested successfully. */ static const struct firmware * nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) { struct nfp_eth_table_port *port; const struct firmware *fw; const char *fw_model; char fw_name[256]; const u8 *serial; u16 interface; int spc, i, j; nfp_info(pf->cpp, "Looking for firmware file in order of priority:\n"); /* First try to find a firmware image specific for this device */ interface = nfp_cpp_interface(pf->cpp); nfp_cpp_serial(pf->cpp, &serial); sprintf(fw_name, "netronome/serial-%pMF-%02hhx-%02hhx.nffw", serial, interface >> 8, interface & 0xff); fw = nfp_net_fw_request(pdev, pf, fw_name); if (fw) return fw; /* Then try the PCI name */ sprintf(fw_name, "netronome/pci-%s.nffw", pci_name(pdev)); fw = nfp_net_fw_request(pdev, pf, fw_name); if (fw) return fw; /* Finally try the card type and media */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; } fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"); if (!fw_model) { dev_err(&pdev->dev, "Error: can't read part number\n"); return NULL; } spc = ARRAY_SIZE(fw_name); spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); for (i = 0; spc > 0 && i < pf->eth_tbl->count; i += j) { port = &pf->eth_tbl->ports[i]; j = 1; while (i + j < pf->eth_tbl->count && port->speed == port[j].speed) j++; spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, "_%dx%d", j, port->speed / 1000); } if (spc <= 0) return NULL; spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, ".nffw"); if (spc <= 0) return NULL; return nfp_net_fw_request(pdev, pf, fw_name); } static int nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, const char *key, const char *default_val, int max_val, int *value) { char hwinfo[64]; long hi_val; int err; snprintf(hwinfo, sizeof(hwinfo), key); err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), default_val); if (err) return err; err = kstrtol(hwinfo, 0, &hi_val); if (err || hi_val < 0 || hi_val > max_val) { dev_warn(&pdev->dev, "Invalid value '%s' from '%s', ignoring\n", hwinfo, key); err = kstrtol(default_val, 0, &hi_val); } *value = hi_val; return err; } /** * nfp_fw_load() - Load the firmware image * @pdev: PCI Device structure * @pf: NFP PF Device structure * @nsp: NFP SP handle * * Return: -ERRNO, 0 for no firmware loaded, 1 for firmware loaded */ static int nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { bool do_reset, fw_loaded = false; const struct firmware *fw = NULL; int err, reset, policy, ifcs = 0; char *token, *ptr; char hwinfo[64]; u16 interface; snprintf(hwinfo, sizeof(hwinfo), "abi_drv_load_ifc"); err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), NFP_NSP_DRV_LOAD_IFC_DEFAULT); if (err) return err; interface = nfp_cpp_interface(pf->cpp); ptr = hwinfo; while ((token = strsep(&ptr, ","))) { unsigned long interface_hi; err = kstrtoul(token, 0, &interface_hi); if (err) { dev_err(&pdev->dev, "Failed to parse interface '%s': %d\n", token, err); return err; } ifcs++; if (interface == interface_hi) break; } if (!token) { dev_info(&pdev->dev, "Firmware will be loaded by partner\n"); return 0; } err = nfp_get_fw_policy_value(pdev, nsp, "abi_drv_reset", NFP_NSP_DRV_RESET_DEFAULT, NFP_NSP_DRV_RESET_NEVER, &reset); if (err) return err; err = nfp_get_fw_policy_value(pdev, nsp, "app_fw_from_flash", NFP_NSP_APP_FW_LOAD_DEFAULT, NFP_NSP_APP_FW_LOAD_PREF, &policy); if (err) return err; fw = nfp_net_fw_find(pdev, pf); do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || (fw && reset == NFP_NSP_DRV_RESET_DISK); if (do_reset) { dev_info(&pdev->dev, "Soft-resetting the NFP\n"); err = nfp_nsp_device_soft_reset(nsp); if (err < 0) { dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n", err); goto exit_release_fw; } } if (fw && policy != NFP_NSP_APP_FW_LOAD_FLASH) { if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp)) goto exit_release_fw; err = nfp_nsp_load_fw(nsp, fw); if (err < 0) { dev_err(&pdev->dev, "FW loading failed: %d\n", err); goto exit_release_fw; } dev_info(&pdev->dev, "Finished loading FW image\n"); fw_loaded = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { /* Don't propagate this error to stick with legacy driver * behavior, failure will be detected later during init. */ if (!nfp_nsp_load_stored_fw(nsp)) dev_info(&pdev->dev, "Finished loading stored FW image\n"); /* Don't flag the fw_loaded in this case since other devices * may reuse the firmware when configured this way */ } else { dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); } exit_release_fw: release_firmware(fw); /* We don't want to unload firmware when other devices may still be * dependent on it, which could be the case if there are multiple * devices that could load firmware. */ if (fw_loaded && ifcs == 1) pf->unload_fw_on_remove = true; return err < 0 ? err : fw_loaded; } static void nfp_nsp_init_ports(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { bool needs_reinit = false; int i; pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); if (!pf->eth_tbl) return; if (!nfp_nsp_has_mac_reinit(nsp)) return; for (i = 0; i < pf->eth_tbl->count; i++) needs_reinit |= pf->eth_tbl->ports[i].override_changed; if (!needs_reinit) return; kfree(pf->eth_tbl); if (nfp_nsp_mac_reinit(nsp)) dev_warn(&pdev->dev, "MAC reinit failed\n"); pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); } static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) { struct nfp_nsp *nsp; int err; err = nfp_resource_wait(pf->cpp, NFP_RESOURCE_NSP, 30); if (err) return err; nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { err = PTR_ERR(nsp); dev_err(&pdev->dev, "Failed to access the NSP: %d\n", err); return err; } err = nfp_nsp_wait(nsp); if (err < 0) goto exit_close_nsp; nfp_nsp_init_ports(pdev, pf, nsp); pf->nspi = __nfp_nsp_identify(nsp); if (pf->nspi) dev_info(&pdev->dev, "BSP: %s\n", pf->nspi->version); err = nfp_fw_load(pdev, pf, nsp); if (err < 0) { kfree(pf->nspi); kfree(pf->eth_tbl); dev_err(&pdev->dev, "Failed to load FW\n"); goto exit_close_nsp; } pf->fw_loaded = !!err; err = 0; exit_close_nsp: nfp_nsp_close(nsp); return err; } static void nfp_fw_unload(struct nfp_pf *pf) { struct nfp_nsp *nsp; int err; nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); return; } err = nfp_nsp_device_soft_reset(nsp); if (err < 0) dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err); else dev_info(&pf->pdev->dev, "Firmware safely unloaded\n"); nfp_nsp_close(nsp); } static int nfp_pf_find_rtsyms(struct nfp_pf *pf) { char pf_symbol[256]; unsigned int pf_id; pf_id = nfp_cppcore_pcie_unit(pf->cpp); /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol); if (pf->mbox && nfp_rtsym_size(pf->mbox) < NFP_MBOX_SYM_MIN_SIZE) { nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n", nfp_rtsym_size(pf->mbox), NFP_MBOX_SYM_MIN_SIZE); return -EINVAL; } return 0; } static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct devlink *devlink; struct nfp_pf *pf; int err; if (pdev->vendor == PCI_VENDOR_ID_NETRONOME && pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF) dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n"); err = pci_enable_device(pdev); if (err < 0) return err; pci_set_master(pdev); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS)); if (err) goto err_pci_disable; err = pci_request_regions(pdev, nfp_driver_name); if (err < 0) { dev_err(&pdev->dev, "Unable to reserve pci resources.\n"); goto err_pci_disable; } devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf)); if (!devlink) { err = -ENOMEM; goto err_rel_regions; } pf = devlink_priv(devlink); INIT_LIST_HEAD(&pf->vnics); INIT_LIST_HEAD(&pf->ports); mutex_init(&pf->lock); pci_set_drvdata(pdev, pf); pf->pdev = pdev; pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev)); if (!pf->wq) { err = -ENOMEM; goto err_pci_priv_unset; } pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev); if (IS_ERR(pf->cpp)) { err = PTR_ERR(pf->cpp); goto err_disable_msix; } err = nfp_resource_table_init(pf->cpp); if (err) goto err_cpp_free; pf->hwinfo = nfp_hwinfo_read(pf->cpp); dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n", nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor"), nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"), nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"), nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"), nfp_hwinfo_lookup(pf->hwinfo, "cpld.version")); err = nfp_pf_board_state_wait(pf); if (err) goto err_hwinfo_free; err = nfp_nsp_init(pdev, pf); if (err) goto err_hwinfo_free; pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); err = nfp_pf_find_rtsyms(pf); if (err) goto err_fw_unload; pf->dump_flag = NFP_DUMP_NSP_DIAG; pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl); err = nfp_pcie_sriov_read_nfd_limit(pf); if (err) goto err_fw_unload; pf->num_vfs = pci_num_vf(pdev); if (pf->num_vfs > pf->limit_vfs) { dev_err(&pdev->dev, "Error: %d VFs already enabled, but loaded FW can only support %d\n", pf->num_vfs, pf->limit_vfs); err = -EINVAL; goto err_fw_unload; } err = nfp_net_pci_probe(pf); if (err) goto err_fw_unload; err = nfp_hwmon_register(pf); if (err) { dev_err(&pdev->dev, "Failed to register hwmon info\n"); goto err_net_remove; } return 0; err_net_remove: nfp_net_pci_remove(pf); err_fw_unload: kfree(pf->rtbl); nfp_mip_close(pf->mip); if (pf->unload_fw_on_remove) nfp_fw_unload(pf); kfree(pf->eth_tbl); kfree(pf->nspi); vfree(pf->dumpspec); err_hwinfo_free: kfree(pf->hwinfo); err_cpp_free: nfp_cpp_free(pf->cpp); err_disable_msix: destroy_workqueue(pf->wq); err_pci_priv_unset: pci_set_drvdata(pdev, NULL); mutex_destroy(&pf->lock); devlink_free(devlink); err_rel_regions: pci_release_regions(pdev); err_pci_disable: pci_disable_device(pdev); return err; } static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) { struct nfp_pf *pf; pf = pci_get_drvdata(pdev); if (!pf) return; nfp_hwmon_unregister(pf); nfp_pcie_sriov_disable(pdev); nfp_net_pci_remove(pf); vfree(pf->dumpspec); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (unload_fw && pf->unload_fw_on_remove) nfp_fw_unload(pf); destroy_workqueue(pf->wq); pci_set_drvdata(pdev, NULL); kfree(pf->hwinfo); nfp_cpp_free(pf->cpp); kfree(pf->eth_tbl); kfree(pf->nspi); mutex_destroy(&pf->lock); devlink_free(priv_to_devlink(pf)); pci_release_regions(pdev); pci_disable_device(pdev); } static void nfp_pci_remove(struct pci_dev *pdev) { __nfp_pci_shutdown(pdev, true); } static void nfp_pci_shutdown(struct pci_dev *pdev) { __nfp_pci_shutdown(pdev, false); } static struct pci_driver nfp_pci_driver = { .name = nfp_driver_name, .id_table = nfp_pci_device_ids, .probe = nfp_pci_probe, .remove = nfp_pci_remove, .shutdown = nfp_pci_shutdown, .sriov_configure = nfp_pcie_sriov_configure, }; static int __init nfp_main_init(void) { int err; pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2017 Netronome Systems\n", nfp_driver_name); nfp_net_debugfs_create(); err = pci_register_driver(&nfp_pci_driver); if (err < 0) goto err_destroy_debugfs; err = pci_register_driver(&nfp_netvf_pci_driver); if (err) goto err_unreg_pf; return err; err_unreg_pf: pci_unregister_driver(&nfp_pci_driver); err_destroy_debugfs: nfp_net_debugfs_destroy(); return err; } static void __exit nfp_main_exit(void) { pci_unregister_driver(&nfp_netvf_pci_driver); pci_unregister_driver(&nfp_pci_driver); nfp_net_debugfs_destroy(); } module_init(nfp_main_init); module_exit(nfp_main_exit); MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_2x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
gpl-2.0
Naoya-Horiguchi/linux
drivers/cpufreq/ppc_cbe_cpufreq.c
317
3715
// SPDX-License-Identifier: GPL-2.0-or-later /* * cpufreq driver for the cell processor * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 * * Author: Christian Krafft <krafft@de.ibm.com> */ #include <linux/cpufreq.h> #include <linux/module.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/cell-regs.h> #include "ppc_cbe_cpufreq.h" /* the CBE supports an 8 step frequency scaling */ static struct cpufreq_frequency_table cbe_freqs[] = { {0, 1, 0}, {0, 2, 0}, {0, 3, 0}, {0, 4, 0}, {0, 5, 0}, {0, 6, 0}, {0, 8, 0}, {0, 10, 0}, {0, 0, CPUFREQ_TABLE_END}, }; /* * hardware specific functions */ static int set_pmode(unsigned int cpu, unsigned int slow_mode) { int rc; if (cbe_cpufreq_has_pmi) rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); else rc = cbe_cpufreq_set_pmode(cpu, slow_mode); pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); return rc; } /* * cpufreq functions */ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) { struct cpufreq_frequency_table *pos; const u32 *max_freqp; u32 max_freq; int cur_pmode; struct device_node *cpu; cpu = of_get_cpu_node(policy->cpu, NULL); if (!cpu) return -ENODEV; pr_debug("init cpufreq on CPU %d\n", policy->cpu); /* * Let's check we can actually get to the CELL regs */ if (!cbe_get_cpu_pmd_regs(policy->cpu) || !cbe_get_cpu_mic_tm_regs(policy->cpu)) { pr_info("invalid CBE regs pointers for cpufreq\n"); of_node_put(cpu); return -EINVAL; } max_freqp = of_get_property(cpu, "clock-frequency", NULL); of_node_put(cpu); if (!max_freqp) return -EINVAL; /* we need the freq in kHz */ max_freq = *max_freqp / 1000; pr_debug("max clock-frequency is at %u kHz\n", max_freq); pr_debug("initializing frequency table\n"); /* initialize frequency table */ cpufreq_for_each_entry(pos, cbe_freqs) { pos->frequency = max_freq / pos->driver_data; pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency); } /* if DEBUG is enabled set_pmode() measures the latency * of a transition */ policy->cpuinfo.transition_latency = 25000; cur_pmode = cbe_cpufreq_get_pmode(policy->cpu); pr_debug("current pmode is at %d\n",cur_pmode); policy->cur = cbe_freqs[cur_pmode].frequency; #ifdef CONFIG_SMP cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif policy->freq_table = cbe_freqs; cbe_cpufreq_pmi_policy_init(policy); return 0; } static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) { cbe_cpufreq_pmi_policy_exit(policy); return 0; } static int cbe_cpufreq_target(struct cpufreq_policy *policy, unsigned int cbe_pmode_new) { pr_debug("setting frequency for cpu %d to %d kHz, " \ "1/%d of max frequency\n", policy->cpu, cbe_freqs[cbe_pmode_new].frequency, cbe_freqs[cbe_pmode_new].driver_data); return set_pmode(policy->cpu, cbe_pmode_new); } static struct cpufreq_driver cbe_cpufreq_driver = { .verify = cpufreq_generic_frequency_table_verify, .target_index = cbe_cpufreq_target, .init = cbe_cpufreq_cpu_init, .exit = cbe_cpufreq_cpu_exit, .name = "cbe-cpufreq", .flags = CPUFREQ_CONST_LOOPS, }; /* * module init and destoy */ static int __init cbe_cpufreq_init(void) { int ret; if (!machine_is(cell)) return -ENODEV; cbe_cpufreq_pmi_init(); ret = cpufreq_register_driver(&cbe_cpufreq_driver); if (ret) cbe_cpufreq_pmi_exit(); return ret; } static void __exit cbe_cpufreq_exit(void) { cpufreq_unregister_driver(&cbe_cpufreq_driver); cbe_cpufreq_pmi_exit(); } module_init(cbe_cpufreq_init); module_exit(cbe_cpufreq_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
gpl-2.0
emuikernel/WNR2000v4
git_home/linux-2.6.git/fs/ocfs2/export.c
573
6701
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * export.c * * Functions to facilitate NFS exporting * * Copyright (C) 2002, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #define MLOG_MASK_PREFIX ML_EXPORT #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "dir.h" #include "dlmglue.h" #include "dcache.h" #include "export.h" #include "inode.h" #include "buffer_head_io.h" #include "suballoc.h" struct ocfs2_inode_handle { u64 ih_blkno; u32 ih_generation; }; static struct dentry *ocfs2_get_dentry(struct super_block *sb, struct ocfs2_inode_handle *handle) { struct inode *inode; struct ocfs2_super *osb = OCFS2_SB(sb); u64 blkno = handle->ih_blkno; int status, set; struct dentry *result; mlog_entry("(0x%p, 0x%p)\n", sb, handle); if (blkno == 0) { mlog(0, "nfs wants inode with blkno: 0\n"); result = ERR_PTR(-ESTALE); goto bail; } inode = ocfs2_ilookup(sb, blkno); /* * If the inode exists in memory, we only need to check it's * generation number */ if (inode) goto check_gen; /* * This will synchronize us against ocfs2_delete_inode() on * all nodes */ status = ocfs2_nfs_sync_lock(osb, 1); if (status < 0) { mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status); goto check_err; } status = ocfs2_test_inode_bit(osb, blkno, &set); if (status < 0) { if (status == -EINVAL) { /* * The blkno NFS gave us doesn't even show up * as an inode, we return -ESTALE to be * nice */ mlog(0, "test inode bit failed %d\n", status); status = -ESTALE; } else { mlog(ML_ERROR, "test inode bit failed %d\n", status); } goto unlock_nfs_sync; } /* If the inode allocator bit is clear, this inode must be stale */ if (!set) { mlog(0, "inode %llu suballoc bit is clear\n", (unsigned long long)blkno); status = -ESTALE; goto unlock_nfs_sync; } inode = ocfs2_iget(osb, blkno, 0, 0); unlock_nfs_sync: ocfs2_nfs_sync_unlock(osb, 1); check_err: if (status < 0) { if (status == -ESTALE) { mlog(0, "stale inode ino: %llu generation: %u\n", (unsigned long long)blkno, handle->ih_generation); } result = ERR_PTR(status); goto bail; } if (IS_ERR(inode)) { mlog_errno(PTR_ERR(inode)); result = (void *)inode; goto bail; } check_gen: if (handle->ih_generation != inode->i_generation) { iput(inode); mlog(0, "stale inode ino: %llu generation: %u\n", (unsigned long long)blkno, handle->ih_generation); result = ERR_PTR(-ESTALE); goto bail; } result = d_obtain_alias(inode); if (!IS_ERR(result)) result->d_op = &ocfs2_dentry_ops; else mlog_errno(PTR_ERR(result)); bail: mlog_exit_ptr(result); return result; } static struct dentry *ocfs2_get_parent(struct dentry *child) { int status; u64 blkno; struct dentry *parent; struct inode *dir = child->d_inode; mlog_entry("(0x%p, '%.*s')\n", child, child->d_name.len, child->d_name.name); mlog(0, "find parent of directory %llu\n", (unsigned long long)OCFS2_I(dir)->ip_blkno); status = ocfs2_inode_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); parent = ERR_PTR(status); goto bail; } status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno); if (status < 0) { parent = ERR_PTR(-ENOENT); goto bail_unlock; } parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); if (!IS_ERR(parent)) parent->d_op = &ocfs2_dentry_ops; bail_unlock: ocfs2_inode_unlock(dir, 0); bail: mlog_exit_ptr(parent); return parent; } static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, int connectable) { struct inode *inode = dentry->d_inode; int len = *max_len; int type = 1; u64 blkno; u32 generation; __le32 *fh = (__force __le32 *) fh_in; mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry, dentry->d_name.len, dentry->d_name.name, fh, len, connectable); if (len < 3 || (connectable && len < 6)) { mlog(ML_ERROR, "fh buffer is too small for encoding\n"); type = 255; goto bail; } blkno = OCFS2_I(inode)->ip_blkno; generation = inode->i_generation; mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); len = 3; fh[0] = cpu_to_le32((u32)(blkno >> 32)); fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[2] = cpu_to_le32(generation); if (connectable && !S_ISDIR(inode->i_mode)) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; blkno = OCFS2_I(parent)->ip_blkno; generation = parent->i_generation; fh[3] = cpu_to_le32((u32)(blkno >> 32)); fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[5] = cpu_to_le32(generation); spin_unlock(&dentry->d_lock); len = 6; type = 2; mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); } *max_len = len; bail: mlog_exit(type); return type; } static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct ocfs2_inode_handle handle; if (fh_len < 3 || fh_type > 2) return NULL; handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32; handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]); handle.ih_generation = le32_to_cpu(fid->raw[2]); return ocfs2_get_dentry(sb, &handle); } static struct dentry *ocfs2_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct ocfs2_inode_handle parent; if (fh_type != 2 || fh_len < 6) return NULL; parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32; parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]); parent.ih_generation = le32_to_cpu(fid->raw[5]); return ocfs2_get_dentry(sb, &parent); } const struct export_operations ocfs2_export_ops = { .encode_fh = ocfs2_encode_fh, .fh_to_dentry = ocfs2_fh_to_dentry, .fh_to_parent = ocfs2_fh_to_parent, .get_parent = ocfs2_get_parent, };
gpl-2.0
blackb1rd/android_kernel_samsung_d2
drivers/hwmon/vt8231.c
829
31981
/* * vt8231.c - Part of lm_sensors, Linux kernel modules * for hardware monitoring * * Copyright (c) 2005 Roger Lucas <vt8231@hiddenengine.co.uk> * Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com> * Aaron M. Marsh <amarsh@sdf.lonestar.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Supports VIA VT8231 South Bridge embedded sensors */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/acpi.h> #include <linux/io.h> static int force_addr; module_param(force_addr, int, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors"); static struct platform_device *pdev; #define VT8231_EXTENT 0x80 #define VT8231_BASE_REG 0x70 #define VT8231_ENABLE_REG 0x74 /* * The VT8231 registers * * The reset value for the input channel configuration is used (Reg 0x4A=0x07) * which sets the selected inputs marked with '*' below if multiple options are * possible: * * Voltage Mode Temperature Mode * Sensor Linux Id Linux Id VIA Id * -------- -------- -------- ------ * CPU Diode N/A temp1 0 * UIC1 in0 temp2 * 1 * UIC2 in1 * temp3 2 * UIC3 in2 * temp4 3 * UIC4 in3 * temp5 4 * UIC5 in4 * temp6 5 * 3.3V in5 N/A * * Note that the BIOS may set the configuration register to a different value * to match the motherboard configuration. */ /* fans numbered 0-1 */ #define VT8231_REG_FAN_MIN(nr) (0x3b + (nr)) #define VT8231_REG_FAN(nr) (0x29 + (nr)) /* Voltage inputs numbered 0-5 */ static const u8 regvolt[] = { 0x21, 0x22, 0x23, 0x24, 0x25, 0x26 }; static const u8 regvoltmax[] = { 0x3d, 0x2b, 0x2d, 0x2f, 0x31, 0x33 }; static const u8 regvoltmin[] = { 0x3e, 0x2c, 0x2e, 0x30, 0x32, 0x34 }; /* * Temperatures are numbered 1-6 according to the Linux kernel specification. * * In the VIA datasheet, however, the temperatures are numbered from zero. * Since it is important that this driver can easily be compared to the VIA * datasheet, we will use the VIA numbering within this driver and map the * kernel sysfs device name to the VIA number in the sysfs callback. */ #define VT8231_REG_TEMP_LOW01 0x49 #define VT8231_REG_TEMP_LOW25 0x4d static const u8 regtemp[] = { 0x1f, 0x21, 0x22, 0x23, 0x24, 0x25 }; static const u8 regtempmax[] = { 0x39, 0x3d, 0x2b, 0x2d, 0x2f, 0x31 }; static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 }; #define TEMP_FROM_REG(reg) (((253 * 4 - (reg)) * 550 + 105) / 210) #define TEMP_MAXMIN_FROM_REG(reg) (((253 - (reg)) * 2200 + 105) / 210) #define TEMP_MAXMIN_TO_REG(val) (253 - ((val) * 210 + 1100) / 2200) #define VT8231_REG_CONFIG 0x40 #define VT8231_REG_ALARM1 0x41 #define VT8231_REG_ALARM2 0x42 #define VT8231_REG_FANDIV 0x47 #define VT8231_REG_UCH_CONFIG 0x4a #define VT8231_REG_TEMP1_CONFIG 0x4b #define VT8231_REG_TEMP2_CONFIG 0x4c /* * temps 0-5 as numbered in VIA datasheet - see later for mapping to Linux * numbering */ #define ISTEMP(i, ch_config) ((i) == 0 ? 1 : \ ((ch_config) >> ((i)+1)) & 0x01) /* voltages 0-5 */ #define ISVOLT(i, ch_config) ((i) == 5 ? 1 : \ !(((ch_config) >> ((i)+2)) & 0x01)) #define DIV_FROM_REG(val) (1 << (val)) /* * NB The values returned here are NOT temperatures. The calibration curves * for the thermistor curves are board-specific and must go in the * sensors.conf file. Temperature sensors are actually ten bits, but the * VIA datasheet only considers the 8 MSBs obtained from the regtemp[] * register. The temperature value returned should have a magnitude of 3, * so we use the VIA scaling as the "true" scaling and use the remaining 2 * LSBs as fractional precision. * * All the on-chip hardware temperature comparisons for the alarms are only * 8-bits wide, and compare against the 8 MSBs of the temperature. The bits * in the registers VT8231_REG_TEMP_LOW01 and VT8231_REG_TEMP_LOW25 are * ignored. */ /* ****** FAN RPM CONVERSIONS ******** * This chip saturates back at 0, not at 255 like many the other chips. * So, 0 means 0 RPM */ static inline u8 FAN_TO_REG(long rpm, int div) { if (rpm <= 0 || rpm > 1310720) return 0; return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255); } #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div))) struct vt8231_data { unsigned short addr; const char *name; struct mutex update_lock; struct device *hwmon_dev; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[6]; /* Register value */ u8 in_max[6]; /* Register value */ u8 in_min[6]; /* Register value */ u16 temp[6]; /* Register value 10 bit, right aligned */ u8 temp_max[6]; /* Register value */ u8 temp_min[6]; /* Register value */ u8 fan[2]; /* Register value */ u8 fan_min[2]; /* Register value */ u8 fan_div[2]; /* Register encoding, shifted right */ u16 alarms; /* Register encoding */ u8 uch_config; }; static struct pci_dev *s_bridge; static int vt8231_probe(struct platform_device *pdev); static int __devexit vt8231_remove(struct platform_device *pdev); static struct vt8231_data *vt8231_update_device(struct device *dev); static void vt8231_init_device(struct vt8231_data *data); static inline int vt8231_read_value(struct vt8231_data *data, u8 reg) { return inb_p(data->addr + reg); } static inline void vt8231_write_value(struct vt8231_data *data, u8 reg, u8 value) { outb_p(value, data->addr + reg); } /* following are the sysfs callback functions */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", ((data->in[nr] - 3) * 10000) / 958); } static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", ((data->in_min[nr] - 3) * 10000) / 958); } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_max[nr] - 3) * 10000) / 958)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } /* Special case for input 5 as this has 3.3V scaling built into the chip */ static ssize_t show_in5(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t show_in5_min(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_min[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", (((data->in_max[5] - 3) * 10000 * 54) / (958 * 34))); } static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 0, 255); vt8231_write_value(data, regvoltmin[5], data->in_min[5]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 0, 255); vt8231_write_value(data, regvoltmax[5], data->in_max[5]); mutex_unlock(&data->update_lock); return count; } #define define_voltage_sysfs(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset) define_voltage_sysfs(0); define_voltage_sysfs(1); define_voltage_sysfs(2); define_voltage_sysfs(3); define_voltage_sysfs(4); static DEVICE_ATTR(in5_input, S_IRUGO, show_in5, NULL); static DEVICE_ATTR(in5_min, S_IRUGO | S_IWUSR, show_in5_min, set_in5_min); static DEVICE_ATTR(in5_max, S_IRUGO | S_IWUSR, show_in5_max, set_in5_max); /* Temperatures */ static ssize_t show_temp0(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp[0] * 250); } static ssize_t show_temp0_max(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp_max[0] * 1000); } static ssize_t show_temp0_min(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->temp_min[0] * 1000); } static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); vt8231_write_value(data, regtempmax[0], data->temp_max[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255); vt8231_write_value(data, regtempmin[0], data->temp_min[0]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_max[nr])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_min[nr])); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255); vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } /* * Note that these map the Linux temperature sensor numbering (1-6) to the VIA * temperature sensor numbering (0-5) */ #define define_temperature_sysfs(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \ show_temp, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, offset - 1) static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp0, NULL); static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp0_max, set_temp0_max); static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp0_min, set_temp0_min); define_temperature_sysfs(2); define_temperature_sysfs(3); define_temperature_sysfs(4); define_temperature_sysfs(5); define_temperature_sysfs(6); /* Fans */ static ssize_t show_fan(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]))); } static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr])); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct vt8231_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr])); vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vt8231_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); unsigned long val; int nr = sensor_attr->index; int old = vt8231_read_value(data, VT8231_REG_FANDIV); long min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); switch (val) { case 1: data->fan_div[nr] = 0; break; case 2: data->fan_div[nr] = 1; break; case 4: data->fan_div[nr] = 2; break; case 8: data->fan_div[nr] = 3; break; default: dev_err(dev, "fan_div value %ld not supported. " "Choose one of 1, 2, 4 or 8!\n", val); mutex_unlock(&data->update_lock); return -EINVAL; } /* Correct the fan minimum speed */ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr])); vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]); old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4); vt8231_write_value(data, VT8231_REG_FANDIV, old); mutex_unlock(&data->update_lock); return count; } #define define_fan_sysfs(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \ show_fan, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ show_fan_div, set_fan_div, offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, set_fan_min, offset - 1) define_fan_sysfs(1); define_fan_sysfs(2); /* Alarms */ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%d\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct vt8231_data *data = vt8231_update_device(dev); return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7); static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct vt8231_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct attribute *vt8231_attributes_temps[6][5] = { { &dev_attr_temp1_input.attr, &dev_attr_temp1_max_hyst.attr, &dev_attr_temp1_max.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp5_input.dev_attr.attr, &sensor_dev_attr_temp5_max_hyst.dev_attr.attr, &sensor_dev_attr_temp5_max.dev_attr.attr, &sensor_dev_attr_temp5_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_temp6_input.dev_attr.attr, &sensor_dev_attr_temp6_max_hyst.dev_attr.attr, &sensor_dev_attr_temp6_max.dev_attr.attr, &sensor_dev_attr_temp6_alarm.dev_attr.attr, NULL } }; static const struct attribute_group vt8231_group_temps[6] = { { .attrs = vt8231_attributes_temps[0] }, { .attrs = vt8231_attributes_temps[1] }, { .attrs = vt8231_attributes_temps[2] }, { .attrs = vt8231_attributes_temps[3] }, { .attrs = vt8231_attributes_temps[4] }, { .attrs = vt8231_attributes_temps[5] }, }; static struct attribute *vt8231_attributes_volts[6][5] = { { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, NULL }, { &dev_attr_in5_input.attr, &dev_attr_in5_min.attr, &dev_attr_in5_max.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, NULL } }; static const struct attribute_group vt8231_group_volts[6] = { { .attrs = vt8231_attributes_volts[0] }, { .attrs = vt8231_attributes_volts[1] }, { .attrs = vt8231_attributes_volts[2] }, { .attrs = vt8231_attributes_volts[3] }, { .attrs = vt8231_attributes_volts[4] }, { .attrs = vt8231_attributes_volts[5] }, }; static struct attribute *vt8231_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &dev_attr_alarms.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group vt8231_group = { .attrs = vt8231_attributes, }; static struct platform_driver vt8231_driver = { .driver = { .owner = THIS_MODULE, .name = "vt8231", }, .probe = vt8231_probe, .remove = __devexit_p(vt8231_remove), }; static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, { 0, } }; MODULE_DEVICE_TABLE(pci, vt8231_pci_ids); static int __devinit vt8231_pci_probe(struct pci_dev *dev, const struct pci_device_id *id); static struct pci_driver vt8231_pci_driver = { .name = "vt8231", .id_table = vt8231_pci_ids, .probe = vt8231_pci_probe, }; static int vt8231_probe(struct platform_device *pdev) { struct resource *res; struct vt8231_data *data; int err = 0, i; /* Reserve the ISA region */ res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, VT8231_EXTENT, vt8231_driver.driver.name)) { dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n", (unsigned long)res->start, (unsigned long)res->end); return -ENODEV; } data = kzalloc(sizeof(struct vt8231_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit_release; } platform_set_drvdata(pdev, data); data->addr = res->start; data->name = "vt8231"; mutex_init(&data->update_lock); vt8231_init_device(data); /* Register sysfs hooks */ err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group); if (err) goto exit_free; /* Must update device information to find out the config field */ data->uch_config = vt8231_read_value(data, VT8231_REG_UCH_CONFIG); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) { if (ISTEMP(i, data->uch_config)) { err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group_temps[i]); if (err) goto exit_remove_files; } } for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) { if (ISVOLT(i, data->uch_config)) { err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group_volts[i]); if (err) goto exit_remove_files; } } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]); sysfs_remove_group(&pdev->dev.kobj, &vt8231_group); exit_free: platform_set_drvdata(pdev, NULL); kfree(data); exit_release: release_region(res->start, VT8231_EXTENT); return err; } static int __devexit vt8231_remove(struct platform_device *pdev) { struct vt8231_data *data = platform_get_drvdata(pdev); int i; hwmon_device_unregister(data->hwmon_dev); for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]); for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]); sysfs_remove_group(&pdev->dev.kobj, &vt8231_group); release_region(data->addr, VT8231_EXTENT); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static void vt8231_init_device(struct vt8231_data *data) { vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0); vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0); } static struct vt8231_data *vt8231_update_device(struct device *dev) { struct vt8231_data *data = dev_get_drvdata(dev); int i; u16 low; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { for (i = 0; i < 6; i++) { if (ISVOLT(i, data->uch_config)) { data->in[i] = vt8231_read_value(data, regvolt[i]); data->in_min[i] = vt8231_read_value(data, regvoltmin[i]); data->in_max[i] = vt8231_read_value(data, regvoltmax[i]); } } for (i = 0; i < 2; i++) { data->fan[i] = vt8231_read_value(data, VT8231_REG_FAN(i)); data->fan_min[i] = vt8231_read_value(data, VT8231_REG_FAN_MIN(i)); } low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01); low = (low >> 6) | ((low & 0x30) >> 2) | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4); for (i = 0; i < 6; i++) { if (ISTEMP(i, data->uch_config)) { data->temp[i] = (vt8231_read_value(data, regtemp[i]) << 2) | ((low >> (2 * i)) & 0x03); data->temp_max[i] = vt8231_read_value(data, regtempmax[i]); data->temp_min[i] = vt8231_read_value(data, regtempmin[i]); } } i = vt8231_read_value(data, VT8231_REG_FANDIV); data->fan_div[0] = (i >> 4) & 0x03; data->fan_div[1] = i >> 6; data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) | (vt8231_read_value(data, VT8231_REG_ALARM2) << 8); /* Set alarm flags correctly */ if (!data->fan[0] && data->fan_min[0]) data->alarms |= 0x40; else if (data->fan[0] && !data->fan_min[0]) data->alarms &= ~0x40; if (!data->fan[1] && data->fan_min[1]) data->alarms |= 0x80; else if (data->fan[1] && !data->fan_min[1]) data->alarms &= ~0x80; data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } static int __devinit vt8231_device_add(unsigned short address) { struct resource res = { .start = address, .end = address + VT8231_EXTENT - 1, .name = "vt8231", .flags = IORESOURCE_IO, }; int err; err = acpi_check_resource_conflict(&res); if (err) goto exit; pdev = platform_device_alloc("vt8231", address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __devinit vt8231_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { u16 address, val; if (force_addr) { address = force_addr & 0xff00; dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", address); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(dev, VT8231_BASE_REG, address | 1)) return -ENODEV; } if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_BASE_REG, &val)) return -ENODEV; address = val & ~(VT8231_EXTENT - 1); if (address == 0) { dev_err(&dev->dev, "base address not set - upgrade BIOS or use force_addr=0xaddr\n"); return -ENODEV; } if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_ENABLE_REG, &val)) return -ENODEV; if (!(val & 0x0001)) { dev_warn(&dev->dev, "enabling sensors\n"); if (PCIBIOS_SUCCESSFUL != pci_write_config_word(dev, VT8231_ENABLE_REG, val | 0x0001)) return -ENODEV; } if (platform_driver_register(&vt8231_driver)) goto exit; /* Sets global pdev as a side effect */ if (vt8231_device_add(address)) goto exit_unregister; /* * Always return failure here. This is to allow other drivers to bind * to this pci device. We don't really want to have control over the * pci device, we only wanted to read as few register values from it. */ /* * We do, however, mark ourselves as using the PCI device to stop it * getting unloaded. */ s_bridge = pci_dev_get(dev); return -ENODEV; exit_unregister: platform_driver_unregister(&vt8231_driver); exit: return -ENODEV; } static int __init sm_vt8231_init(void) { return pci_register_driver(&vt8231_pci_driver); } static void __exit sm_vt8231_exit(void) { pci_unregister_driver(&vt8231_pci_driver); if (s_bridge != NULL) { platform_device_unregister(pdev); platform_driver_unregister(&vt8231_driver); pci_dev_put(s_bridge); s_bridge = NULL; } } MODULE_AUTHOR("Roger Lucas <vt8231@hiddenengine.co.uk>"); MODULE_DESCRIPTION("VT8231 sensors"); MODULE_LICENSE("GPL"); module_init(sm_vt8231_init); module_exit(sm_vt8231_exit);
gpl-2.0
DutchDanny/SensationXL-ICS
fs/gfs2/bmap.c
2365
32538
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/gfs2_ondisk.h> #include <linux/crc32.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "inode.h" #include "meta_io.h" #include "quota.h" #include "rgrp.h" #include "super.h" #include "trans.h" #include "dir.h" #include "util.h" #include "trace_gfs2.h" /* This doesn't need to be that large as max 64 bit pointers in a 4k * block is 512, so __u16 is fine for that. It saves stack space to * keep it small. */ struct metapath { struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT]; __u16 mp_list[GFS2_MAX_META_HEIGHT]; }; typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh, struct buffer_head *bh, __be64 *top, __be64 *bottom, unsigned int height, void *data); struct strip_mine { int sm_first; unsigned int sm_height; }; /** * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page * @ip: the inode * @dibh: the dinode buffer * @block: the block number that was allocated * @page: The (optional) page. This is looked up if @page is NULL * * Returns: errno */ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, u64 block, struct page *page) { struct inode *inode = &ip->i_inode; struct buffer_head *bh; int release = 0; if (!page || page->index) { page = grab_cache_page(inode->i_mapping, 0); if (!page) return -ENOMEM; release = 1; } if (!PageUptodate(page)) { void *kaddr = kmap(page); u64 dsize = i_size_read(inode); if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) dsize = dibh->b_size - sizeof(struct gfs2_dinode); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); kunmap(page); SetPageUptodate(page); } if (!page_has_buffers(page)) create_empty_buffers(page, 1 << inode->i_blkbits, (1 << BH_Uptodate)); bh = page_buffers(page); if (!buffer_mapped(bh)) map_bh(bh, inode->i_sb, block); set_buffer_uptodate(bh); if (!gfs2_is_jdata(ip)) mark_buffer_dirty(bh); if (!gfs2_is_writeback(ip)) gfs2_trans_add_bh(ip->i_gl, bh, 0); if (release) { unlock_page(page); page_cache_release(page); } return 0; } /** * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big * @ip: The GFS2 inode to unstuff * @page: The (optional) page. This is looked up if the @page is NULL * * This routine unstuffs a dinode and returns it to a "normal" state such * that the height can be grown in the traditional way. * * Returns: errno */ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) { struct buffer_head *bh, *dibh; struct gfs2_dinode *di; u64 block = 0; int isdir = gfs2_is_dir(ip); int error; down_write(&ip->i_rw_mutex); error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out; if (i_size_read(&ip->i_inode)) { /* Get a free block, fill it with the stuffed data, and write it out to disk */ unsigned int n = 1; error = gfs2_alloc_block(ip, &block, &n); if (error) goto out_brelse; if (isdir) { gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1); error = gfs2_dir_get_new_buffer(ip, block, &bh); if (error) goto out_brelse; gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header), dibh, sizeof(struct gfs2_dinode)); brelse(bh); } else { error = gfs2_unstuffer_page(ip, dibh, block, page); if (error) goto out_brelse; } } /* Set up the pointer to the new block */ gfs2_trans_add_bh(ip->i_gl, dibh, 1); di = (struct gfs2_dinode *)dibh->b_data; gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); if (i_size_read(&ip->i_inode)) { *(__be64 *)(di + 1) = cpu_to_be64(block); gfs2_add_inode_blocks(&ip->i_inode, 1); di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); } ip->i_height = 1; di->di_height = cpu_to_be16(1); out_brelse: brelse(dibh); out: up_write(&ip->i_rw_mutex); return error; } /** * find_metapath - Find path through the metadata tree * @sdp: The superblock * @mp: The metapath to return the result in * @block: The disk block to look up * @height: The pre-calculated height of the metadata tree * * This routine returns a struct metapath structure that defines a path * through the metadata of inode "ip" to get to block "block". * * Example: * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a * filesystem with a blocksize of 4096. * * find_metapath() would return a struct metapath structure set to: * mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48, * and mp_list[2] = 165. * * That means that in order to get to the block containing the byte at * offset 101342453, we would load the indirect block pointed to by pointer * 0 in the dinode. We would then load the indirect block pointed to by * pointer 48 in that indirect block. We would then load the data block * pointed to by pointer 165 in that indirect block. * * ---------------------------------------- * | Dinode | | * | | 4| * | |0 1 2 3 4 5 9| * | | 6| * ---------------------------------------- * | * | * V * ---------------------------------------- * | Indirect Block | * | 5| * | 4 4 4 4 4 5 5 1| * |0 5 6 7 8 9 0 1 2| * ---------------------------------------- * | * | * V * ---------------------------------------- * | Indirect Block | * | 1 1 1 1 1 5| * | 6 6 6 6 6 1| * |0 3 4 5 6 7 2| * ---------------------------------------- * | * | * V * ---------------------------------------- * | Data block containing offset | * | 101342453 | * | | * | | * ---------------------------------------- * */ static void find_metapath(const struct gfs2_sbd *sdp, u64 block, struct metapath *mp, unsigned int height) { unsigned int i; for (i = height; i--;) mp->mp_list[i] = do_div(block, sdp->sd_inptrs); } static inline unsigned int metapath_branch_start(const struct metapath *mp) { if (mp->mp_list[0] == 0) return 2; return 1; } /** * metapointer - Return pointer to start of metadata in a buffer * @height: The metadata height (0 = dinode) * @mp: The metapath * * Return a pointer to the block number of the next height of the metadata * tree given a buffer containing the pointer to the current height of the * metadata tree. */ static inline __be64 *metapointer(unsigned int height, const struct metapath *mp) { struct buffer_head *bh = mp->mp_bh[height]; unsigned int head_size = (height > 0) ? sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode); return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height]; } /** * lookup_metapath - Walk the metadata tree to a specific point * @ip: The inode * @mp: The metapath * * Assumes that the inode's buffer has already been looked up and * hooked onto mp->mp_bh[0] and that the metapath has been initialised * by find_metapath(). * * If this function encounters part of the tree which has not been * allocated, it returns the current height of the tree at the point * at which it found the unallocated block. Blocks which are found are * added to the mp->mp_bh[] list. * * Returns: error or height of metadata tree */ static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp) { unsigned int end_of_metadata = ip->i_height - 1; unsigned int x; __be64 *ptr; u64 dblock; int ret; for (x = 0; x < end_of_metadata; x++) { ptr = metapointer(x, mp); dblock = be64_to_cpu(*ptr); if (!dblock) return x + 1; ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, 0, &mp->mp_bh[x+1]); if (ret) return ret; } return ip->i_height; } static inline void release_metapath(struct metapath *mp) { int i; for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) { if (mp->mp_bh[i] == NULL) break; brelse(mp->mp_bh[i]); } } /** * gfs2_extent_length - Returns length of an extent of blocks * @start: Start of the buffer * @len: Length of the buffer in bytes * @ptr: Current position in the buffer * @limit: Max extent length to return (0 = unlimited) * @eob: Set to 1 if we hit "end of block" * * If the first block is zero (unallocated) it will return the number of * unallocated blocks in the extent, otherwise it will return the number * of contiguous blocks in the extent. * * Returns: The length of the extent (minimum of one block) */ static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob) { const __be64 *end = (start + len); const __be64 *first = ptr; u64 d = be64_to_cpu(*ptr); *eob = 0; do { ptr++; if (ptr >= end) break; if (limit && --limit == 0) break; if (d) d++; } while(be64_to_cpu(*ptr) == d); if (ptr >= end) *eob = 1; return (ptr - first); } static inline void bmap_lock(struct gfs2_inode *ip, int create) { if (create) down_write(&ip->i_rw_mutex); else down_read(&ip->i_rw_mutex); } static inline void bmap_unlock(struct gfs2_inode *ip, int create) { if (create) up_write(&ip->i_rw_mutex); else up_read(&ip->i_rw_mutex); } static inline __be64 *gfs2_indirect_init(struct metapath *mp, struct gfs2_glock *gl, unsigned int i, unsigned offset, u64 bn) { __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data + ((i > 1) ? sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode))); BUG_ON(i < 1); BUG_ON(mp->mp_bh[i] != NULL); mp->mp_bh[i] = gfs2_meta_new(gl, bn); gfs2_trans_add_bh(gl, mp->mp_bh[i], 1); gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN); gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header)); ptr += offset; *ptr = cpu_to_be64(bn); return ptr; } enum alloc_state { ALLOC_DATA = 0, ALLOC_GROW_DEPTH = 1, ALLOC_GROW_HEIGHT = 2, /* ALLOC_UNSTUFF = 3, TBD and rather complicated */ }; /** * gfs2_bmap_alloc - Build a metadata tree of the requested height * @inode: The GFS2 inode * @lblock: The logical starting block of the extent * @bh_map: This is used to return the mapping details * @mp: The metapath * @sheight: The starting height (i.e. whats already mapped) * @height: The height to build to * @maxlen: The max number of data blocks to alloc * * In this routine we may have to alloc: * i) Indirect blocks to grow the metadata tree height * ii) Indirect blocks to fill in lower part of the metadata tree * iii) Data blocks * * The function is in two parts. The first part works out the total * number of blocks which we need. The second part does the actual * allocation asking for an extent at a time (if enough contiguous free * blocks are available, there will only be one request per bmap call) * and uses the state machine to initialise the blocks in order. * * Returns: errno on error */ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, struct buffer_head *bh_map, struct metapath *mp, const unsigned int sheight, const unsigned int height, const unsigned int maxlen) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct buffer_head *dibh = mp->mp_bh[0]; u64 bn, dblock = 0; unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; unsigned dblks = 0; unsigned ptrs_per_blk; const unsigned end_of_metadata = height - 1; int eob = 0; enum alloc_state state; __be64 *ptr; __be64 zero_bn = 0; BUG_ON(sheight < 1); BUG_ON(dibh == NULL); gfs2_trans_add_bh(ip->i_gl, dibh, 1); if (height == sheight) { struct buffer_head *bh; /* Bottom indirect block exists, find unalloced extent size */ ptr = metapointer(end_of_metadata, mp); bh = mp->mp_bh[end_of_metadata]; dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob); BUG_ON(dblks < 1); state = ALLOC_DATA; } else { /* Need to allocate indirect blocks */ ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs; dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]); if (height == ip->i_height) { /* Writing into existing tree, extend tree down */ iblks = height - sheight; state = ALLOC_GROW_DEPTH; } else { /* Building up tree height */ state = ALLOC_GROW_HEIGHT; iblks = height - ip->i_height; branch_start = metapath_branch_start(mp); iblks += (height - branch_start); } } /* start of the second part of the function (state machine) */ blks = dblks + iblks; i = sheight; do { int error; n = blks - alloced; error = gfs2_alloc_block(ip, &bn, &n); if (error) return error; alloced += n; if (state != ALLOC_DATA || gfs2_is_jdata(ip)) gfs2_trans_add_unrevoke(sdp, bn, n); switch (state) { /* Growing height of tree */ case ALLOC_GROW_HEIGHT: if (i == 1) { ptr = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); zero_bn = *ptr; } for (; i - 1 < height - ip->i_height && n > 0; i++, n--) gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); if (i - 1 == height - ip->i_height) { i--; gfs2_buffer_copy_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header), dibh, sizeof(struct gfs2_dinode)); gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + sizeof(__be64)); ptr = (__be64 *)(mp->mp_bh[i]->b_data + sizeof(struct gfs2_meta_header)); *ptr = zero_bn; state = ALLOC_GROW_DEPTH; for(i = branch_start; i < height; i++) { if (mp->mp_bh[i] == NULL) break; brelse(mp->mp_bh[i]); mp->mp_bh[i] = NULL; } i = branch_start; } if (n == 0) break; /* Branching from existing tree */ case ALLOC_GROW_DEPTH: if (i > 1 && i < height) gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1); for (; i < height && n > 0; i++, n--) gfs2_indirect_init(mp, ip->i_gl, i, mp->mp_list[i-1], bn++); if (i == height) state = ALLOC_DATA; if (n == 0) break; /* Tree complete, adding data blocks */ case ALLOC_DATA: BUG_ON(n > dblks); BUG_ON(mp->mp_bh[end_of_metadata] == NULL); gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1); dblks = n; ptr = metapointer(end_of_metadata, mp); dblock = bn; while (n-- > 0) *ptr++ = cpu_to_be64(bn++); break; } } while ((state != ALLOC_DATA) || !dblock); ip->i_height = height; gfs2_add_inode_blocks(&ip->i_inode, alloced); gfs2_dinode_out(ip, mp->mp_bh[0]->b_data); map_bh(bh_map, inode->i_sb, dblock); bh_map->b_size = dblks << inode->i_blkbits; set_buffer_new(bh_map); return 0; } /** * gfs2_block_map - Map a block from an inode to a disk block * @inode: The inode * @lblock: The logical block number * @bh_map: The bh to be mapped * @create: True if its ok to alloc blocks to satify the request * * Sets buffer_mapped() if successful, sets buffer_boundary() if a * read of metadata will be required before the next block can be * mapped. Sets buffer_new() if new blocks were allocated. * * Returns: errno */ int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh_map, int create) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned int bsize = sdp->sd_sb.sb_bsize; const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits; const u64 *arr = sdp->sd_heightsize; __be64 *ptr; u64 size; struct metapath mp; int ret; int eob; unsigned int len; struct buffer_head *bh; u8 height; BUG_ON(maxlen == 0); memset(mp.mp_bh, 0, sizeof(mp.mp_bh)); bmap_lock(ip, create); clear_buffer_mapped(bh_map); clear_buffer_new(bh_map); clear_buffer_boundary(bh_map); trace_gfs2_bmap(ip, bh_map, lblock, create, 1); if (gfs2_is_dir(ip)) { bsize = sdp->sd_jbsize; arr = sdp->sd_jheightsize; } ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); if (ret) goto out; height = ip->i_height; size = (lblock + 1) * bsize; while (size > arr[height]) height++; find_metapath(sdp, lblock, &mp, height); ret = 1; if (height > ip->i_height || gfs2_is_stuffed(ip)) goto do_alloc; ret = lookup_metapath(ip, &mp); if (ret < 0) goto out; if (ret != ip->i_height) goto do_alloc; ptr = metapointer(ip->i_height - 1, &mp); if (*ptr == 0) goto do_alloc; map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr)); bh = mp.mp_bh[ip->i_height - 1]; len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob); bh_map->b_size = (len << inode->i_blkbits); if (eob) set_buffer_boundary(bh_map); ret = 0; out: release_metapath(&mp); trace_gfs2_bmap(ip, bh_map, lblock, create, ret); bmap_unlock(ip, create); return ret; do_alloc: /* All allocations are done here, firstly check create flag */ if (!create) { BUG_ON(gfs2_is_stuffed(ip)); ret = 0; goto out; } /* At this point ret is the tree depth of already allocated blocks */ ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen); goto out; } /* * Deprecated: do not use in new code */ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen) { struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 }; int ret; int create = *new; BUG_ON(!extlen); BUG_ON(!dblock); BUG_ON(!new); bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5)); ret = gfs2_block_map(inode, lblock, &bh, create); *extlen = bh.b_size >> inode->i_blkbits; *dblock = bh.b_blocknr; if (buffer_new(&bh)) *new = 1; else *new = 0; return ret; } /** * recursive_scan - recursively scan through the end of a file * @ip: the inode * @dibh: the dinode buffer * @mp: the path through the metadata to the point to start * @height: the height the recursion is at * @block: the indirect block to look at * @first: 1 if this is the first block * @bc: the call to make for each piece of metadata * @data: data opaque to this function to pass to @bc * * When this is first called @height and @block should be zero and * @first should be 1. * * Returns: errno */ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh, struct metapath *mp, unsigned int height, u64 block, int first, block_call_t bc, void *data) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *bh = NULL; __be64 *top, *bottom; u64 bn; int error; int mh_size = sizeof(struct gfs2_meta_header); if (!height) { error = gfs2_meta_inode_buffer(ip, &bh); if (error) return error; dibh = bh; top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; } else { error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh); if (error) return error; top = (__be64 *)(bh->b_data + mh_size) + (first ? mp->mp_list[height] : 0); bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs; } error = bc(ip, dibh, bh, top, bottom, height, data); if (error) goto out; if (height < ip->i_height - 1) for (; top < bottom; top++, first = 0) { if (!*top) continue; bn = be64_to_cpu(*top); error = recursive_scan(ip, dibh, mp, height + 1, bn, first, bc, data); if (error) break; } out: brelse(bh); return error; } /** * do_strip - Look for a layer a particular layer of the file and strip it off * @ip: the inode * @dibh: the dinode buffer * @bh: A buffer of pointers * @top: The first pointer in the buffer * @bottom: One more than the last pointer * @height: the height this buffer is at * @data: a pointer to a struct strip_mine * * Returns: errno */ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, struct buffer_head *bh, __be64 *top, __be64 *bottom, unsigned int height, void *data) { struct strip_mine *sm = data; struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrp_list rlist; u64 bn, bstart; u32 blen, btotal; __be64 *p; unsigned int rg_blocks = 0; int metadata; unsigned int revokes = 0; int x; int error = 0; if (!*top) sm->sm_first = 0; if (height != sm->sm_height) return 0; if (sm->sm_first) { top++; sm->sm_first = 0; } metadata = (height != ip->i_height - 1); if (metadata) revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs; else if (ip->i_depth) revokes = sdp->sd_inptrs; if (ip != GFS2_I(sdp->sd_rindex)) error = gfs2_rindex_hold(sdp, &ip->i_alloc->al_ri_gh); else if (!sdp->sd_rgrps) error = gfs2_ri_update(ip); if (error) return error; memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); bstart = 0; blen = 0; for (p = top; p < bottom; p++) { if (!*p) continue; bn = be64_to_cpu(*p); if (bstart + blen == bn) blen++; else { if (bstart) gfs2_rlist_add(sdp, &rlist, bstart); bstart = bn; blen = 1; } } if (bstart) gfs2_rlist_add(sdp, &rlist, bstart); else goto out; /* Nothing to do */ gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); for (x = 0; x < rlist.rl_rgrps; x++) { struct gfs2_rgrpd *rgd; rgd = rlist.rl_ghs[x].gh_gl->gl_object; rg_blocks += rgd->rd_length; } error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs); if (error) goto out_rlist; error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT + RES_STATFS + RES_QUOTA, revokes); if (error) goto out_rg_gunlock; down_write(&ip->i_rw_mutex); gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_trans_add_bh(ip->i_gl, bh, 1); bstart = 0; blen = 0; btotal = 0; for (p = top; p < bottom; p++) { if (!*p) continue; bn = be64_to_cpu(*p); if (bstart + blen == bn) blen++; else { if (bstart) { if (metadata) __gfs2_free_meta(ip, bstart, blen); else __gfs2_free_data(ip, bstart, blen); btotal += blen; } bstart = bn; blen = 1; } *p = 0; gfs2_add_inode_blocks(&ip->i_inode, -1); } if (bstart) { if (metadata) __gfs2_free_meta(ip, bstart, blen); else __gfs2_free_data(ip, bstart, blen); btotal += blen; } gfs2_statfs_change(sdp, 0, +btotal, 0); gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid, ip->i_inode.i_gid); ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_dinode_out(ip, dibh->b_data); up_write(&ip->i_rw_mutex); gfs2_trans_end(sdp); out_rg_gunlock: gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); out_rlist: gfs2_rlist_free(&rlist); out: if (ip != GFS2_I(sdp->sd_rindex)) gfs2_glock_dq_uninit(&ip->i_alloc->al_ri_gh); return error; } /** * gfs2_block_truncate_page - Deal with zeroing out data for truncate * * This is partly borrowed from ext3. */ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) { struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); unsigned long index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize, iblock, length, pos; struct buffer_head *bh; struct page *page; int err; page = grab_cache_page(mapping, index); if (!page) return 0; blocksize = inode->i_sb->s_blocksize; length = blocksize - (offset & (blocksize - 1)); iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } err = 0; if (!buffer_mapped(bh)) { gfs2_block_map(inode, iblock, bh, 0); /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) goto unlock; } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; err = 0; } if (!gfs2_is_writeback(ip)) gfs2_trans_add_bh(ip->i_gl, bh, 0); zero_user(page, offset, length); mark_buffer_dirty(bh); unlock: unlock_page(page); page_cache_release(page); return err; } static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct address_space *mapping = inode->i_mapping; struct buffer_head *dibh; int journaled = gfs2_is_jdata(ip); int error; error = gfs2_trans_begin(sdp, RES_DINODE + (journaled ? RES_JDATA : 0), 0); if (error) return error; error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out; gfs2_trans_add_bh(ip->i_gl, dibh, 1); if (gfs2_is_stuffed(ip)) { gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); } else { if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) { error = gfs2_block_truncate_page(mapping, newsize); if (error) goto out_brelse; } ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; } i_size_write(inode, newsize); ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_dinode_out(ip, dibh->b_data); truncate_pagecache(inode, oldsize, newsize); out_brelse: brelse(dibh); out: gfs2_trans_end(sdp); return error; } static int trunc_dealloc(struct gfs2_inode *ip, u64 size) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); unsigned int height = ip->i_height; u64 lblock; struct metapath mp; int error; if (!size) lblock = 0; else lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift; find_metapath(sdp, lblock, &mp, ip->i_height); if (!gfs2_alloc_get(ip)) return -ENOMEM; error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); if (error) goto out; while (height--) { struct strip_mine sm; sm.sm_first = !!size; sm.sm_height = height; error = recursive_scan(ip, NULL, &mp, 0, 0, 1, do_strip, &sm); if (error) break; } gfs2_quota_unhold(ip); out: gfs2_alloc_put(ip); return error; } static int trunc_end(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *dibh; int error; error = gfs2_trans_begin(sdp, RES_DINODE, 0); if (error) return error; down_write(&ip->i_rw_mutex); error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out; if (!i_size_read(&ip->i_inode)) { ip->i_height = 0; ip->i_goal = ip->i_no_addr; gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); } ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); out: up_write(&ip->i_rw_mutex); gfs2_trans_end(sdp); return error; } /** * do_shrink - make a file smaller * @inode: the inode * @oldsize: the current inode size * @newsize: the size to make the file * * Called with an exclusive lock on @inode. The @size must * be equal to or smaller than the current inode size. * * Returns: errno */ static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize) { struct gfs2_inode *ip = GFS2_I(inode); int error; error = trunc_start(inode, oldsize, newsize); if (error < 0) return error; if (gfs2_is_stuffed(ip)) return 0; error = trunc_dealloc(ip, newsize); if (error == 0) error = trunc_end(ip); return error; } void gfs2_trim_blocks(struct inode *inode) { u64 size = inode->i_size; int ret; ret = do_shrink(inode, size, size); WARN_ON(ret != 0); } /** * do_grow - Touch and update inode size * @inode: The inode * @size: The new size * * This function updates the timestamps on the inode and * may also increase the size of the inode. This function * must not be called with @size any smaller than the current * inode size. * * Although it is not strictly required to unstuff files here, * earlier versions of GFS2 have a bug in the stuffed file reading * code which will result in a buffer overrun if the size is larger * than the max stuffed file size. In order to prevent this from * occurring, such files are unstuffed, but in other cases we can * just update the inode size directly. * * Returns: 0 on success, or -ve on error */ static int do_grow(struct inode *inode, u64 size) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct buffer_head *dibh; struct gfs2_alloc *al = NULL; int error; if (gfs2_is_stuffed(ip) && (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) { al = gfs2_alloc_get(ip); if (al == NULL) return -ENOMEM; error = gfs2_quota_lock_check(ip); if (error) goto do_grow_alloc_put; al->al_requested = 1; error = gfs2_inplace_reserve(ip); if (error) goto do_grow_qunlock; } error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0); if (error) goto do_grow_release; if (al) { error = gfs2_unstuff_dinode(ip, NULL); if (error) goto do_end_trans; } error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto do_end_trans; i_size_write(inode, size); ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); do_end_trans: gfs2_trans_end(sdp); do_grow_release: if (al) { gfs2_inplace_release(ip); do_grow_qunlock: gfs2_quota_unlock(ip); do_grow_alloc_put: gfs2_alloc_put(ip); } return error; } /** * gfs2_setattr_size - make a file a given size * @inode: the inode * @newsize: the size to make the file * * The file size can grow, shrink, or stay the same size. This * is called holding i_mutex and an exclusive glock on the inode * in question. * * Returns: errno */ int gfs2_setattr_size(struct inode *inode, u64 newsize) { int ret; u64 oldsize; BUG_ON(!S_ISREG(inode->i_mode)); ret = inode_newsize_ok(inode, newsize); if (ret) return ret; oldsize = inode->i_size; if (newsize >= oldsize) return do_grow(inode, newsize); return do_shrink(inode, oldsize, newsize); } int gfs2_truncatei_resume(struct gfs2_inode *ip) { int error; error = trunc_dealloc(ip, i_size_read(&ip->i_inode)); if (!error) error = trunc_end(ip); return error; } int gfs2_file_dealloc(struct gfs2_inode *ip) { return trunc_dealloc(ip, 0); } /** * gfs2_write_alloc_required - figure out if a write will require an allocation * @ip: the file being written to * @offset: the offset to write to * @len: the number of bytes being written * * Returns: 1 if an alloc is required, 0 otherwise */ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, unsigned int len) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head bh; unsigned int shift; u64 lblock, lblock_stop, size; u64 end_of_file; if (!len) return 0; if (gfs2_is_stuffed(ip)) { if (offset + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) return 1; return 0; } shift = sdp->sd_sb.sb_bsize_shift; BUG_ON(gfs2_is_dir(ip)); end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift; lblock = offset >> shift; lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; if (lblock_stop > end_of_file) return 1; size = (lblock_stop - lblock) << shift; do { bh.b_state = 0; bh.b_size = size; gfs2_block_map(&ip->i_inode, lblock, &bh, 0); if (!buffer_mapped(&bh)) return 1; size -= bh.b_size; lblock += (bh.b_size >> ip->i_inode.i_blkbits); } while(size > 0); return 0; }
gpl-2.0
roalex/sgs3-kernel
net/ieee802154/raw.c
4157
5552
/* * Raw IEEE 802.15.4 sockets * * Copyright 2007, 2008 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> */ #include <linux/net.h> #include <linux/module.h> #include <linux/if_arp.h> #include <linux/list.h> #include <linux/slab.h> #include <net/sock.h> #include <net/af_ieee802154.h> #include "af802154.h" static HLIST_HEAD(raw_head); static DEFINE_RWLOCK(raw_lock); static void raw_hash(struct sock *sk) { write_lock_bh(&raw_lock); sk_add_node(sk, &raw_head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); write_unlock_bh(&raw_lock); } static void raw_unhash(struct sock *sk) { write_lock_bh(&raw_lock); if (sk_del_node_init(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&raw_lock); } static void raw_close(struct sock *sk, long timeout) { sk_common_release(sk); } static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int len) { struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr; int err = 0; struct net_device *dev = NULL; if (len < sizeof(*addr)) return -EINVAL; if (addr->family != AF_IEEE802154) return -EINVAL; lock_sock(sk); dev = ieee802154_get_dev(sock_net(sk), &addr->addr); if (!dev) { err = -ENODEV; goto out; } if (dev->type != ARPHRD_IEEE802154) { err = -ENODEV; goto out_put; } sk->sk_bound_dev_if = dev->ifindex; sk_dst_reset(sk); out_put: dev_put(dev); out: release_sock(sk); return err; } static int raw_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { return -ENOTSUPP; } static int raw_disconnect(struct sock *sk, int flags) { return 0; } static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct net_device *dev; unsigned mtu; struct sk_buff *skb; int err; if (msg->msg_flags & MSG_OOB) { pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags); return -EOPNOTSUPP; } lock_sock(sk); if (!sk->sk_bound_dev_if) dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); else dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); release_sock(sk); if (!dev) { pr_debug("no dev\n"); err = -ENXIO; goto out; } mtu = dev->mtu; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); if (size > mtu) { pr_debug("size = %Zu, mtu = %u\n", size, mtu); err = -EINVAL; goto out_dev; } skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_dev; skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_mac_header(skb); skb_reset_network_header(skb); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err < 0) goto out_skb; skb->dev = dev; skb->sk = sk; skb->protocol = htons(ETH_P_IEEE802154); dev_put(dev); err = dev_queue_xmit(skb); if (err > 0) err = net_xmit_errno(err); return err ?: size; out_skb: kfree_skb(skb); out_dev: dev_put(dev); out: return err; } static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; struct sk_buff *skb; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; } static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } return NET_RX_SUCCESS; } void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb) { struct sock *sk; struct hlist_node *node; read_lock(&raw_lock); sk_for_each(sk, node, &raw_head) { bh_lock_sock(sk); if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dev->ifindex) { struct sk_buff *clone; clone = skb_clone(skb, GFP_ATOMIC); if (clone) raw_rcv_skb(sk, clone); } bh_unlock_sock(sk); } read_unlock(&raw_lock); } static int raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { return -EOPNOTSUPP; } static int raw_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { return -EOPNOTSUPP; } struct proto ieee802154_raw_prot = { .name = "IEEE-802.15.4-RAW", .owner = THIS_MODULE, .obj_size = sizeof(struct sock), .close = raw_close, .bind = raw_bind, .sendmsg = raw_sendmsg, .recvmsg = raw_recvmsg, .hash = raw_hash, .unhash = raw_unhash, .connect = raw_connect, .disconnect = raw_disconnect, .getsockopt = raw_getsockopt, .setsockopt = raw_setsockopt, };
gpl-2.0