repo_name
string
path
string
copies
string
size
string
content
string
license
string
crdroid-devices/android_kernel_htc_msm8974
drivers/acpi/acpica/dsmethod.c
4919
21022
/****************************************************************************** * * Module Name: dsmethod - Parser/Interpreter interface - control method parsing * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" #ifdef ACPI_DISASSEMBLER #include <acpi/acdisasm.h> #endif #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsmethod") /* Local prototypes */ static acpi_status acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); /******************************************************************************* * * FUNCTION: acpi_ds_method_error * * PARAMETERS: Status - Execution status * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Called on method error. Invoke the global exception handler if * present, dump the method data if the disassembler is configured * * Note: Allows the exception handler to change the status code * ******************************************************************************/ acpi_status acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) { ACPI_FUNCTION_ENTRY(); /* Ignore AE_OK and control exception codes */ if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) { return (status); } /* Invoke the global exception handler */ if (acpi_gbl_exception_handler) { /* Exit the interpreter, allow handler to execute methods */ acpi_ex_exit_interpreter(); /* * Handler can map the exception code to anything it wants, including * AE_OK, in which case the executing method will not be aborted. */ status = acpi_gbl_exception_handler(status, walk_state->method_node ? walk_state->method_node-> name.integer : 0, walk_state->opcode, walk_state->aml_offset, NULL); acpi_ex_enter_interpreter(); } acpi_ds_clear_implicit_return(walk_state); #ifdef ACPI_DISASSEMBLER if (ACPI_FAILURE(status)) { /* Display method locals/args if disassembler is present */ acpi_dm_dump_method_info(status, walk_state, walk_state->op); } #endif return (status); } /******************************************************************************* * * FUNCTION: acpi_ds_create_method_mutex * * PARAMETERS: obj_desc - The method object * * RETURN: Status * * DESCRIPTION: Create a mutex object for a serialized control method * ******************************************************************************/ static acpi_status acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) { union acpi_operand_object *mutex_desc; acpi_status status; ACPI_FUNCTION_TRACE(ds_create_method_mutex); /* Create the new mutex object */ mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); if (!mutex_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Create the actual OS Mutex */ status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } mutex_desc->mutex.sync_level = method_desc->method.sync_level; method_desc->method.mutex = mutex_desc; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_begin_method_execution * * PARAMETERS: method_node - Node of the method * obj_desc - The method object * walk_state - current state, NULL if not yet executing * a method. * * RETURN: Status * * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, * increments the thread count, and waits at the method semaphore * for clearance to execute. * ******************************************************************************/ acpi_status acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node); if (!method_node) { return_ACPI_STATUS(AE_NULL_ENTRY); } /* Prevent wraparound of thread count */ if (obj_desc->method.thread_count == ACPI_UINT8_MAX) { ACPI_ERROR((AE_INFO, "Method reached maximum reentrancy limit (255)")); return_ACPI_STATUS(AE_AML_METHOD_LIMIT); } /* * If this method is serialized, we need to acquire the method mutex. */ if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) { /* * Create a mutex for the method if it is defined to be Serialized * and a mutex has not already been created. We defer the mutex creation * until a method is actually executed, to minimize the object count */ if (!obj_desc->method.mutex) { status = acpi_ds_create_method_mutex(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* * The current_sync_level (per-thread) must be less than or equal to * the sync level of the method. This mechanism provides some * deadlock prevention * * Top-level method invocation has no walk state at this point */ if (walk_state && (walk_state->thread->current_sync_level > obj_desc->method.mutex->mutex.sync_level)) { ACPI_ERROR((AE_INFO, "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)", acpi_ut_get_node_name(method_node), walk_state->thread->current_sync_level)); return_ACPI_STATUS(AE_AML_MUTEX_ORDER); } /* * Obtain the method mutex if necessary. Do not acquire mutex for a * recursive call. */ if (!walk_state || !obj_desc->method.mutex->mutex.thread_id || (walk_state->thread->thread_id != obj_desc->method.mutex->mutex.thread_id)) { /* * Acquire the method mutex. This releases the interpreter if we * block (and reacquires it before it returns) */ status = acpi_ex_system_wait_mutex(obj_desc->method.mutex-> mutex.os_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Update the mutex and walk info and save the original sync_level */ if (walk_state) { obj_desc->method.mutex->mutex. original_sync_level = walk_state->thread->current_sync_level; obj_desc->method.mutex->mutex.thread_id = walk_state->thread->thread_id; walk_state->thread->current_sync_level = obj_desc->method.sync_level; } else { obj_desc->method.mutex->mutex. original_sync_level = obj_desc->method.mutex->mutex.sync_level; } } /* Always increase acquisition depth */ obj_desc->method.mutex->mutex.acquisition_depth++; } /* * Allocate an Owner ID for this method, only if this is the first thread * to begin concurrent execution. We only need one owner_id, even if the * method is invoked recursively. */ if (!obj_desc->method.owner_id) { status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); if (ACPI_FAILURE(status)) { goto cleanup; } } /* * Increment the method parse tree thread count since it has been * reentered one more time (even if it is the same thread) */ obj_desc->method.thread_count++; return_ACPI_STATUS(status); cleanup: /* On error, must release the method mutex (if present) */ if (obj_desc->method.mutex) { acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_call_control_method * * PARAMETERS: Thread - Info for this thread * this_walk_state - Current walk state * Op - Current Op to be walked * * RETURN: Status * * DESCRIPTION: Transfer execution to a called control method * ******************************************************************************/ acpi_status acpi_ds_call_control_method(struct acpi_thread_state *thread, struct acpi_walk_state *this_walk_state, union acpi_parse_object *op) { acpi_status status; struct acpi_namespace_node *method_node; struct acpi_walk_state *next_walk_state = NULL; union acpi_operand_object *obj_desc; struct acpi_evaluate_info *info; u32 i; ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Calling method %p, currentstate=%p\n", this_walk_state->prev_op, this_walk_state)); /* * Get the namespace entry for the control method we are about to call */ method_node = this_walk_state->method_call_node; if (!method_node) { return_ACPI_STATUS(AE_NULL_ENTRY); } obj_desc = acpi_ns_get_attached_object(method_node); if (!obj_desc) { return_ACPI_STATUS(AE_NULL_OBJECT); } /* Init for new method, possibly wait on method mutex */ status = acpi_ds_begin_method_execution(method_node, obj_desc, this_walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Begin method parse/execution. Create a new walk state */ next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc, thread); if (!next_walk_state) { status = AE_NO_MEMORY; goto cleanup; } /* * The resolved arguments were put on the previous walk state's operand * stack. Operands on the previous walk state stack always * start at index 0. Also, null terminate the list of arguments */ this_walk_state->operands[this_walk_state->num_operands] = NULL; /* * Allocate and initialize the evaluation information block * TBD: this is somewhat inefficient, should change interface to * ds_init_aml_walk. For now, keeps this struct off the CPU stack */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } info->parameters = &this_walk_state->operands[0]; status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, obj_desc->method.aml_start, obj_desc->method.aml_length, info, ACPI_IMODE_EXECUTE); ACPI_FREE(info); if (ACPI_FAILURE(status)) { goto cleanup; } /* * Delete the operands on the previous walkstate operand stack * (they were copied to new objects) */ for (i = 0; i < obj_desc->method.param_count; i++) { acpi_ut_remove_reference(this_walk_state->operands[i]); this_walk_state->operands[i] = NULL; } /* Clear the operand stack */ this_walk_state->num_operands = 0; ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Begin nested execution of [%4.4s] **** WalkState=%p\n", method_node->name.ascii, next_walk_state)); /* Invoke an internal method if necessary */ if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) { status = obj_desc->method.dispatch.implementation(next_walk_state); if (status == AE_OK) { status = AE_CTRL_TERMINATE; } } return_ACPI_STATUS(status); cleanup: /* On error, we must terminate the method properly */ acpi_ds_terminate_control_method(obj_desc, next_walk_state); if (next_walk_state) { acpi_ds_delete_walk_state(next_walk_state); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_restart_control_method * * PARAMETERS: walk_state - State for preempted method (caller) * return_desc - Return value from the called method * * RETURN: Status * * DESCRIPTION: Restart a method that was preempted by another (nested) method * invocation. Handle the return value (if any) from the callee. * ******************************************************************************/ acpi_status acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, union acpi_operand_object *return_desc) { acpi_status status; int same_as_implicit_return; ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n", acpi_ut_get_node_name(walk_state->method_node), walk_state->method_call_op, return_desc)); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n", walk_state->return_used, walk_state->results, walk_state)); /* Did the called method return a value? */ if (return_desc) { /* Is the implicit return object the same as the return desc? */ same_as_implicit_return = (walk_state->implicit_return_obj == return_desc); /* Are we actually going to use the return value? */ if (walk_state->return_used) { /* Save the return value from the previous method */ status = acpi_ds_result_push(return_desc, walk_state); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(return_desc); return_ACPI_STATUS(status); } /* * Save as THIS method's return value in case it is returned * immediately to yet another method */ walk_state->return_desc = return_desc; } /* * The following code is the optional support for the so-called * "implicit return". Some AML code assumes that the last value of the * method is "implicitly" returned to the caller, in the absence of an * explicit return value. * * Just save the last result of the method as the return value. * * NOTE: this is optional because the ASL language does not actually * support this behavior. */ else if (!acpi_ds_do_implicit_return (return_desc, walk_state, FALSE) || same_as_implicit_return) { /* * Delete the return value if it will not be used by the * calling method or remove one reference if the explicit return * is the same as the implicit return value. */ acpi_ut_remove_reference(return_desc); } } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_terminate_control_method * * PARAMETERS: method_desc - Method object * walk_state - State associated with the method * * RETURN: None * * DESCRIPTION: Terminate a control method. Delete everything that the method * created, delete all locals and arguments, and delete the parse * tree if requested. * * MUTEX: Interpreter is locked * ******************************************************************************/ void acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, struct acpi_walk_state *walk_state) { ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state); /* method_desc is required, walk_state is optional */ if (!method_desc) { return_VOID; } if (walk_state) { /* Delete all arguments and locals */ acpi_ds_method_data_delete_all(walk_state); /* * If method is serialized, release the mutex and restore the * current sync level for this thread */ if (method_desc->method.mutex) { /* Acquisition Depth handles recursive calls */ method_desc->method.mutex->mutex.acquisition_depth--; if (!method_desc->method.mutex->mutex.acquisition_depth) { walk_state->thread->current_sync_level = method_desc->method.mutex->mutex. original_sync_level; acpi_os_release_mutex(method_desc->method. mutex->mutex.os_mutex); method_desc->method.mutex->mutex.thread_id = 0; } } /* * Delete any namespace objects created anywhere within the * namespace by the execution of this method. Unless: * 1) This method is a module-level executable code method, in which * case we want make the objects permanent. * 2) There are other threads executing the method, in which case we * will wait until the last thread has completed. */ if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) && (method_desc->method.thread_count == 1)) { /* Delete any direct children of (created by) this method */ acpi_ns_delete_namespace_subtree(walk_state-> method_node); /* * Delete any objects that were created by this method * elsewhere in the namespace (if any were created). * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the * deletion such that we don't have to perform an entire * namespace walk for every control method execution. */ if (method_desc->method. info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) { acpi_ns_delete_namespace_by_owner(method_desc-> method. owner_id); method_desc->method.info_flags &= ~ACPI_METHOD_MODIFIED_NAMESPACE; } } } /* Decrement the thread count on the method */ if (method_desc->method.thread_count) { method_desc->method.thread_count--; } else { ACPI_ERROR((AE_INFO, "Invalid zero thread count in method")); } /* Are there any other threads currently executing this method? */ if (method_desc->method.thread_count) { /* * Additional threads. Do not release the owner_id in this case, * we immediately reuse it for the next thread executing this method */ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "*** Completed execution of one thread, %u threads remaining\n", method_desc->method.thread_count)); } else { /* This is the only executing thread for this method */ /* * Support to dynamically change a method from not_serialized to * Serialized if it appears that the method is incorrectly written and * does not support multiple thread execution. The best example of this * is if such a method creates namespace objects and blocks. A second * thread will fail with an AE_ALREADY_EXISTS exception. * * This code is here because we must wait until the last thread exits * before marking the method as serialized. */ if (method_desc->method. info_flags & ACPI_METHOD_SERIALIZED_PENDING) { if (walk_state) { ACPI_INFO((AE_INFO, "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error", walk_state->method_node->name. ascii)); } /* * Method tried to create an object twice and was marked as * "pending serialized". The probable cause is that the method * cannot handle reentrancy. * * The method was created as not_serialized, but it tried to create * a named object and then blocked, causing the second thread * entrance to begin and then fail. Workaround this problem by * marking the method permanently as Serialized when the last * thread exits here. */ method_desc->method.info_flags &= ~ACPI_METHOD_SERIALIZED_PENDING; method_desc->method.info_flags |= ACPI_METHOD_SERIALIZED; method_desc->method.sync_level = 0; } /* No more threads, we can free the owner_id */ if (! (method_desc->method. info_flags & ACPI_METHOD_MODULE_LEVEL)) { acpi_ut_release_owner_id(&method_desc->method.owner_id); } } return_VOID; }
gpl-2.0
ariev7x/android_kernel_logan
drivers/acpi/acpica/utobject.c
4919
20341
/****************************************************************************** * * Module Name: utobject - ACPI object create/delete/size/cache routines * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utobject") /* Local prototypes */ static acpi_status acpi_ut_get_simple_object_size(union acpi_operand_object *obj, acpi_size * obj_length); static acpi_status acpi_ut_get_package_object_size(union acpi_operand_object *obj, acpi_size * obj_length); static acpi_status acpi_ut_get_element_length(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context); /******************************************************************************* * * FUNCTION: acpi_ut_create_internal_object_dbg * * PARAMETERS: module_name - Source file name of caller * line_number - Line number of caller * component_id - Component type of caller * Type - ACPI Type of the new object * * RETURN: A new internal object, null on failure * * DESCRIPTION: Create and initialize a new internal object. * * NOTE: We always allocate the worst-case object descriptor because * these objects are cached, and we want them to be * one-size-satisifies-any-request. This in itself may not be * the most memory efficient, but the efficiency of the object * cache should more than make up for this! * ******************************************************************************/ union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char *module_name, u32 line_number, u32 component_id, acpi_object_type type) { union acpi_operand_object *object; union acpi_operand_object *second_object; ACPI_FUNCTION_TRACE_STR(ut_create_internal_object_dbg, acpi_ut_get_type_name(type)); /* Allocate the raw object descriptor */ object = acpi_ut_allocate_object_desc_dbg(module_name, line_number, component_id); if (!object) { return_PTR(NULL); } switch (type) { case ACPI_TYPE_REGION: case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: /* These types require a secondary object */ second_object = acpi_ut_allocate_object_desc_dbg(module_name, line_number, component_id); if (!second_object) { acpi_ut_delete_object_desc(object); return_PTR(NULL); } second_object->common.type = ACPI_TYPE_LOCAL_EXTRA; second_object->common.reference_count = 1; /* Link the second object to the first */ object->common.next_object = second_object; break; default: /* All others have no secondary object */ break; } /* Save the object type in the object descriptor */ object->common.type = (u8) type; /* Init the reference count */ object->common.reference_count = 1; /* Any per-type initialization should go here */ return_PTR(object); } /******************************************************************************* * * FUNCTION: acpi_ut_create_package_object * * PARAMETERS: Count - Number of package elements * * RETURN: Pointer to a new Package object, null on failure * * DESCRIPTION: Create a fully initialized package object * ******************************************************************************/ union acpi_operand_object *acpi_ut_create_package_object(u32 count) { union acpi_operand_object *package_desc; union acpi_operand_object **package_elements; ACPI_FUNCTION_TRACE_U32(ut_create_package_object, count); /* Create a new Package object */ package_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE); if (!package_desc) { return_PTR(NULL); } /* * Create the element array. Count+1 allows the array to be null * terminated. */ package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count + 1) * sizeof(void *)); if (!package_elements) { acpi_ut_remove_reference(package_desc); return_PTR(NULL); } package_desc->package.count = count; package_desc->package.elements = package_elements; return_PTR(package_desc); } /******************************************************************************* * * FUNCTION: acpi_ut_create_integer_object * * PARAMETERS: initial_value - Initial value for the integer * * RETURN: Pointer to a new Integer object, null on failure * * DESCRIPTION: Create an initialized integer object * ******************************************************************************/ union acpi_operand_object *acpi_ut_create_integer_object(u64 initial_value) { union acpi_operand_object *integer_desc; ACPI_FUNCTION_TRACE(ut_create_integer_object); /* Create and initialize a new integer object */ integer_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); if (!integer_desc) { return_PTR(NULL); } integer_desc->integer.value = initial_value; return_PTR(integer_desc); } /******************************************************************************* * * FUNCTION: acpi_ut_create_buffer_object * * PARAMETERS: buffer_size - Size of buffer to be created * * RETURN: Pointer to a new Buffer object, null on failure * * DESCRIPTION: Create a fully initialized buffer object * ******************************************************************************/ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size) { union acpi_operand_object *buffer_desc; u8 *buffer = NULL; ACPI_FUNCTION_TRACE_U32(ut_create_buffer_object, buffer_size); /* Create a new Buffer object */ buffer_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER); if (!buffer_desc) { return_PTR(NULL); } /* Create an actual buffer only if size > 0 */ if (buffer_size > 0) { /* Allocate the actual buffer */ buffer = ACPI_ALLOCATE_ZEROED(buffer_size); if (!buffer) { ACPI_ERROR((AE_INFO, "Could not allocate size %u", (u32) buffer_size)); acpi_ut_remove_reference(buffer_desc); return_PTR(NULL); } } /* Complete buffer object initialization */ buffer_desc->buffer.flags |= AOPOBJ_DATA_VALID; buffer_desc->buffer.pointer = buffer; buffer_desc->buffer.length = (u32) buffer_size; /* Return the new buffer descriptor */ return_PTR(buffer_desc); } /******************************************************************************* * * FUNCTION: acpi_ut_create_string_object * * PARAMETERS: string_size - Size of string to be created. Does not * include NULL terminator, this is added * automatically. * * RETURN: Pointer to a new String object * * DESCRIPTION: Create a fully initialized string object * ******************************************************************************/ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size) { union acpi_operand_object *string_desc; char *string; ACPI_FUNCTION_TRACE_U32(ut_create_string_object, string_size); /* Create a new String object */ string_desc = acpi_ut_create_internal_object(ACPI_TYPE_STRING); if (!string_desc) { return_PTR(NULL); } /* * Allocate the actual string buffer -- (Size + 1) for NULL terminator. * NOTE: Zero-length strings are NULL terminated */ string = ACPI_ALLOCATE_ZEROED(string_size + 1); if (!string) { ACPI_ERROR((AE_INFO, "Could not allocate size %u", (u32) string_size)); acpi_ut_remove_reference(string_desc); return_PTR(NULL); } /* Complete string object initialization */ string_desc->string.pointer = string; string_desc->string.length = (u32) string_size; /* Return the new string descriptor */ return_PTR(string_desc); } /******************************************************************************* * * FUNCTION: acpi_ut_valid_internal_object * * PARAMETERS: Object - Object to be validated * * RETURN: TRUE if object is valid, FALSE otherwise * * DESCRIPTION: Validate a pointer to be a union acpi_operand_object * ******************************************************************************/ u8 acpi_ut_valid_internal_object(void *object) { ACPI_FUNCTION_NAME(ut_valid_internal_object); /* Check for a null pointer */ if (!object) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "**** Null Object Ptr\n")); return (FALSE); } /* Check the descriptor type field */ switch (ACPI_GET_DESCRIPTOR_TYPE(object)) { case ACPI_DESC_TYPE_OPERAND: /* The object appears to be a valid union acpi_operand_object */ return (TRUE); default: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p is not not an ACPI operand obj [%s]\n", object, acpi_ut_get_descriptor_name(object))); break; } return (FALSE); } /******************************************************************************* * * FUNCTION: acpi_ut_allocate_object_desc_dbg * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * component_id - Caller's component ID (for error output) * * RETURN: Pointer to newly allocated object descriptor. Null on error * * DESCRIPTION: Allocate a new object descriptor. Gracefully handle * error conditions. * ******************************************************************************/ void *acpi_ut_allocate_object_desc_dbg(const char *module_name, u32 line_number, u32 component_id) { union acpi_operand_object *object; ACPI_FUNCTION_TRACE(ut_allocate_object_desc_dbg); object = acpi_os_acquire_object(acpi_gbl_operand_cache); if (!object) { ACPI_ERROR((module_name, line_number, "Could not allocate an object descriptor")); return_PTR(NULL); } /* Mark the descriptor type */ memset(object, 0, sizeof(union acpi_operand_object)); ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND); ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p Size %X\n", object, (u32) sizeof(union acpi_operand_object))); return_PTR(object); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_object_desc * * PARAMETERS: Object - An Acpi internal object to be deleted * * RETURN: None. * * DESCRIPTION: Free an ACPI object descriptor or add it to the object cache * ******************************************************************************/ void acpi_ut_delete_object_desc(union acpi_operand_object *object) { ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object); /* Object must be a union acpi_operand_object */ if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) { ACPI_ERROR((AE_INFO, "%p is not an ACPI Operand object [%s]", object, acpi_ut_get_descriptor_name(object))); return_VOID; } (void)acpi_os_release_object(acpi_gbl_operand_cache, object); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_get_simple_object_size * * PARAMETERS: internal_object - An ACPI operand object * obj_length - Where the length is returned * * RETURN: Status * * DESCRIPTION: This function is called to determine the space required to * contain a simple object for return to an external user. * * The length includes the object structure plus any additional * needed space. * ******************************************************************************/ static acpi_status acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, acpi_size * obj_length) { acpi_size length; acpi_size size; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object); /* * Handle a null object (Could be a uninitialized package * element -- which is legal) */ if (!internal_object) { *obj_length = sizeof(union acpi_object); return_ACPI_STATUS(AE_OK); } /* Start with the length of the Acpi object */ length = sizeof(union acpi_object); if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) { /* Object is a named object (reference), just return the length */ *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length); return_ACPI_STATUS(status); } /* * The final length depends on the object type * Strings and Buffers are packed right up against the parent object and * must be accessed bytewise or there may be alignment problems on * certain processors */ switch (internal_object->common.type) { case ACPI_TYPE_STRING: length += (acpi_size) internal_object->string.length + 1; break; case ACPI_TYPE_BUFFER: length += (acpi_size) internal_object->buffer.length; break; case ACPI_TYPE_INTEGER: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_POWER: /* No extra data for these types */ break; case ACPI_TYPE_LOCAL_REFERENCE: switch (internal_object->reference.class) { case ACPI_REFCLASS_NAME: /* * Get the actual length of the full pathname to this object. * The reference will be converted to the pathname to the object */ size = acpi_ns_get_pathname_length(internal_object-> reference.node); if (!size) { return_ACPI_STATUS(AE_BAD_PARAMETER); } length += ACPI_ROUND_UP_TO_NATIVE_WORD(size); break; default: /* * No other reference opcodes are supported. * Notably, Locals and Args are not supported, but this may be * required eventually. */ ACPI_ERROR((AE_INFO, "Cannot convert to external object - " "unsupported Reference Class [%s] 0x%X in object %p", acpi_ut_get_reference_name(internal_object), internal_object->reference.class, internal_object)); status = AE_TYPE; break; } break; default: ACPI_ERROR((AE_INFO, "Cannot convert to external object - " "unsupported type [%s] 0x%X in object %p", acpi_ut_get_object_type_name(internal_object), internal_object->common.type, internal_object)); status = AE_TYPE; break; } /* * Account for the space required by the object rounded up to the next * multiple of the machine word size. This keeps each object aligned * on a machine word boundary. (preventing alignment faults on some * machines.) */ *obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_get_element_length * * PARAMETERS: acpi_pkg_callback * * RETURN: Status * * DESCRIPTION: Get the length of one package element. * ******************************************************************************/ static acpi_status acpi_ut_get_element_length(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context) { acpi_status status = AE_OK; struct acpi_pkg_info *info = (struct acpi_pkg_info *)context; acpi_size object_space; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: /* * Simple object - just get the size (Null object/entry is handled * here also) and sum it into the running package length */ status = acpi_ut_get_simple_object_size(source_object, &object_space); if (ACPI_FAILURE(status)) { return (status); } info->length += object_space; break; case ACPI_COPY_TYPE_PACKAGE: /* Package object - nothing much to do here, let the walk handle it */ info->num_packages++; state->pkg.this_target_obj = NULL; break; default: /* No other types allowed */ return (AE_BAD_PARAMETER); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_get_package_object_size * * PARAMETERS: internal_object - An ACPI internal object * obj_length - Where the length is returned * * RETURN: Status * * DESCRIPTION: This function is called to determine the space required to * contain a package object for return to an external user. * * This is moderately complex since a package contains other * objects including packages. * ******************************************************************************/ static acpi_status acpi_ut_get_package_object_size(union acpi_operand_object *internal_object, acpi_size * obj_length) { acpi_status status; struct acpi_pkg_info info; ACPI_FUNCTION_TRACE_PTR(ut_get_package_object_size, internal_object); info.length = 0; info.object_space = 0; info.num_packages = 1; status = acpi_ut_walk_package_tree(internal_object, NULL, acpi_ut_get_element_length, &info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * We have handled all of the objects in all levels of the package. * just add the length of the package objects themselves. * Round up to the next machine word. */ info.length += ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) * (acpi_size) info.num_packages; /* Return the total package length */ *obj_length = info.length; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_get_object_size * * PARAMETERS: internal_object - An ACPI internal object * obj_length - Where the length will be returned * * RETURN: Status * * DESCRIPTION: This function is called to determine the space required to * contain an object for return to an API user. * ******************************************************************************/ acpi_status acpi_ut_get_object_size(union acpi_operand_object *internal_object, acpi_size * obj_length) { acpi_status status; ACPI_FUNCTION_ENTRY(); if ((ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_OPERAND) && (internal_object->common.type == ACPI_TYPE_PACKAGE)) { status = acpi_ut_get_package_object_size(internal_object, obj_length); } else { status = acpi_ut_get_simple_object_size(internal_object, obj_length); } return (status); }
gpl-2.0
shinkumara/royss_shinkumara_kernel
drivers/acpi/acpica/uteval.c
4919
10045
/****************************************************************************** * * Module Name: uteval - Object evaluation * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("uteval") /******************************************************************************* * * FUNCTION: acpi_ut_evaluate_object * * PARAMETERS: prefix_node - Starting node * Path - Path to object from starting node * expected_return_types - Bitmap of allowed return types * return_desc - Where a return value is stored * * RETURN: Status * * DESCRIPTION: Evaluates a namespace object and verifies the type of the * return object. Common code that simplifies accessing objects * that have required return objects of fixed types. * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, char *path, u32 expected_return_btypes, union acpi_operand_object **return_desc) { struct acpi_evaluate_info *info; acpi_status status; u32 return_btype; ACPI_FUNCTION_TRACE(ut_evaluate_object); /* Allocate the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } info->prefix_node = prefix_node; info->pathname = path; /* Evaluate the object/method */ status = acpi_ns_evaluate(info); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s.%s] was not found\n", acpi_ut_get_node_name(prefix_node), path)); } else { ACPI_ERROR_METHOD("Method execution failed", prefix_node, path, status); } goto cleanup; } /* Did we get a return object? */ if (!info->return_object) { if (expected_return_btypes) { ACPI_ERROR_METHOD("No object was returned from", prefix_node, path, AE_NOT_EXIST); status = AE_NOT_EXIST; } goto cleanup; } /* Map the return object type to the bitmapped type */ switch ((info->return_object)->common.type) { case ACPI_TYPE_INTEGER: return_btype = ACPI_BTYPE_INTEGER; break; case ACPI_TYPE_BUFFER: return_btype = ACPI_BTYPE_BUFFER; break; case ACPI_TYPE_STRING: return_btype = ACPI_BTYPE_STRING; break; case ACPI_TYPE_PACKAGE: return_btype = ACPI_BTYPE_PACKAGE; break; default: return_btype = 0; break; } if ((acpi_gbl_enable_interpreter_slack) && (!expected_return_btypes)) { /* * We received a return object, but one was not expected. This can * happen frequently if the "implicit return" feature is enabled. * Just delete the return object and return AE_OK. */ acpi_ut_remove_reference(info->return_object); goto cleanup; } /* Is the return object one of the expected types? */ if (!(expected_return_btypes & return_btype)) { ACPI_ERROR_METHOD("Return object type is incorrect", prefix_node, path, AE_TYPE); ACPI_ERROR((AE_INFO, "Type returned from %s was incorrect: %s, expected Btypes: 0x%X", path, acpi_ut_get_object_type_name(info->return_object), expected_return_btypes)); /* On error exit, we must delete the return object */ acpi_ut_remove_reference(info->return_object); status = AE_TYPE; goto cleanup; } /* Object type is OK, return it */ *return_desc = info->return_object; cleanup: ACPI_FREE(info); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_evaluate_numeric_object * * PARAMETERS: object_name - Object name to be evaluated * device_node - Node for the device * Value - Where the value is returned * * RETURN: Status * * DESCRIPTION: Evaluates a numeric namespace object for a selected device * and stores result in *Value. * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status acpi_ut_evaluate_numeric_object(char *object_name, struct acpi_namespace_node *device_node, u64 *value) { union acpi_operand_object *obj_desc; acpi_status status; ACPI_FUNCTION_TRACE(ut_evaluate_numeric_object); status = acpi_ut_evaluate_object(device_node, object_name, ACPI_BTYPE_INTEGER, &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Get the returned Integer */ *value = obj_desc->integer.value; /* On exit, we must delete the return object */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_execute_STA * * PARAMETERS: device_node - Node for the device * Flags - Where the status flags are returned * * RETURN: Status * * DESCRIPTION: Executes _STA for selected device and stores results in * *Flags. * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags) { union acpi_operand_object *obj_desc; acpi_status status; ACPI_FUNCTION_TRACE(ut_execute_STA); status = acpi_ut_evaluate_object(device_node, METHOD_NAME__STA, ACPI_BTYPE_INTEGER, &obj_desc); if (ACPI_FAILURE(status)) { if (AE_NOT_FOUND == status) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "_STA on %4.4s was not found, assuming device is present\n", acpi_ut_get_node_name(device_node))); *flags = ACPI_UINT32_MAX; status = AE_OK; } return_ACPI_STATUS(status); } /* Extract the status flags */ *flags = (u32) obj_desc->integer.value; /* On exit, we must delete the return object */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_execute_power_methods * * PARAMETERS: device_node - Node for the device * method_names - Array of power method names * method_count - Number of methods to execute * out_values - Where the power method values are returned * * RETURN: Status, out_values * * DESCRIPTION: Executes the specified power methods for the device and returns * the result(s). * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node, const char **method_names, u8 method_count, u8 *out_values) { union acpi_operand_object *obj_desc; acpi_status status; acpi_status final_status = AE_NOT_FOUND; u32 i; ACPI_FUNCTION_TRACE(ut_execute_power_methods); for (i = 0; i < method_count; i++) { /* * Execute the power method (_sx_d or _sx_w). The only allowable * return type is an Integer. */ status = acpi_ut_evaluate_object(device_node, ACPI_CAST_PTR(char, method_names[i]), ACPI_BTYPE_INTEGER, &obj_desc); if (ACPI_SUCCESS(status)) { out_values[i] = (u8)obj_desc->integer.value; /* Delete the return object */ acpi_ut_remove_reference(obj_desc); final_status = AE_OK; /* At least one value is valid */ continue; } out_values[i] = ACPI_UINT8_MAX; if (status == AE_NOT_FOUND) { continue; /* Ignore if not found */ } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Failed %s on Device %4.4s, %s\n", ACPI_CAST_PTR(char, method_names[i]), acpi_ut_get_node_name(device_node), acpi_format_exception(status))); } return_ACPI_STATUS(final_status); }
gpl-2.0
iAMr00t/android_kernel_huawei_msm8928
drivers/acpi/acpica/utcopy.c
4919
28561
/****************************************************************************** * * Module Name: utcopy - Internal to external object translation utilities * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utcopy") /* Local prototypes */ static acpi_status acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object, union acpi_object *external_object, u8 * data_space, acpi_size * buffer_space_used); static acpi_status acpi_ut_copy_ielement_to_ielement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context); static acpi_status acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object, u8 * buffer, acpi_size * space_used); static acpi_status acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj, union acpi_operand_object **return_obj); static acpi_status acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, union acpi_operand_object **internal_object); static acpi_status acpi_ut_copy_simple_object(union acpi_operand_object *source_desc, union acpi_operand_object *dest_desc); static acpi_status acpi_ut_copy_ielement_to_eelement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context); static acpi_status acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, union acpi_operand_object *dest_obj, struct acpi_walk_state *walk_state); /******************************************************************************* * * FUNCTION: acpi_ut_copy_isimple_to_esimple * * PARAMETERS: internal_object - Source object to be copied * external_object - Where to return the copied object * data_space - Where object data is returned (such as * buffer and string data) * buffer_space_used - Length of data_space that was used * * RETURN: Status * * DESCRIPTION: This function is called to copy a simple internal object to * an external object. * * The data_space buffer is assumed to have sufficient space for * the object. * ******************************************************************************/ static acpi_status acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object, union acpi_object *external_object, u8 * data_space, acpi_size * buffer_space_used) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple); *buffer_space_used = 0; /* * Check for NULL object case (could be an uninitialized * package element) */ if (!internal_object) { return_ACPI_STATUS(AE_OK); } /* Always clear the external object */ ACPI_MEMSET(external_object, 0, sizeof(union acpi_object)); /* * In general, the external object will be the same type as * the internal object */ external_object->type = internal_object->common.type; /* However, only a limited number of external types are supported */ switch (internal_object->common.type) { case ACPI_TYPE_STRING: external_object->string.pointer = (char *)data_space; external_object->string.length = internal_object->string.length; *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size) internal_object-> string. length + 1); ACPI_MEMCPY((void *)data_space, (void *)internal_object->string.pointer, (acpi_size) internal_object->string.length + 1); break; case ACPI_TYPE_BUFFER: external_object->buffer.pointer = data_space; external_object->buffer.length = internal_object->buffer.length; *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string. length); ACPI_MEMCPY((void *)data_space, (void *)internal_object->buffer.pointer, internal_object->buffer.length); break; case ACPI_TYPE_INTEGER: external_object->integer.value = internal_object->integer.value; break; case ACPI_TYPE_LOCAL_REFERENCE: /* This is an object reference. */ switch (internal_object->reference.class) { case ACPI_REFCLASS_NAME: /* * For namepath, return the object handle ("reference") * We are referring to the namespace node */ external_object->reference.handle = internal_object->reference.node; external_object->reference.actual_type = acpi_ns_get_type(internal_object->reference.node); break; default: /* All other reference types are unsupported */ return_ACPI_STATUS(AE_TYPE); } break; case ACPI_TYPE_PROCESSOR: external_object->processor.proc_id = internal_object->processor.proc_id; external_object->processor.pblk_address = internal_object->processor.address; external_object->processor.pblk_length = internal_object->processor.length; break; case ACPI_TYPE_POWER: external_object->power_resource.system_level = internal_object->power_resource.system_level; external_object->power_resource.resource_order = internal_object->power_resource.resource_order; break; default: /* * There is no corresponding external object type */ ACPI_ERROR((AE_INFO, "Unsupported object type, cannot convert to external object: %s", acpi_ut_get_type_name(internal_object->common. type))); return_ACPI_STATUS(AE_SUPPORT); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ielement_to_eelement * * PARAMETERS: acpi_pkg_callback * * RETURN: Status * * DESCRIPTION: Copy one package element to another package element * ******************************************************************************/ static acpi_status acpi_ut_copy_ielement_to_eelement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context) { acpi_status status = AE_OK; struct acpi_pkg_info *info = (struct acpi_pkg_info *)context; acpi_size object_space; u32 this_index; union acpi_object *target_object; ACPI_FUNCTION_ENTRY(); this_index = state->pkg.index; target_object = (union acpi_object *) &((union acpi_object *)(state->pkg.dest_object))->package. elements[this_index]; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: /* * This is a simple or null object */ status = acpi_ut_copy_isimple_to_esimple(source_object, target_object, info->free_space, &object_space); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_COPY_TYPE_PACKAGE: /* * Build the package object */ target_object->type = ACPI_TYPE_PACKAGE; target_object->package.count = source_object->package.count; target_object->package.elements = ACPI_CAST_PTR(union acpi_object, info->free_space); /* * Pass the new package object back to the package walk routine */ state->pkg.this_target_obj = target_object; /* * Save space for the array of objects (Package elements) * update the buffer length counter */ object_space = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size) target_object-> package.count * sizeof(union acpi_object)); break; default: return (AE_BAD_PARAMETER); } info->free_space += object_space; info->length += object_space; return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ipackage_to_epackage * * PARAMETERS: internal_object - Pointer to the object we are returning * Buffer - Where the object is returned * space_used - Where the object length is returned * * RETURN: Status * * DESCRIPTION: This function is called to place a package object in a user * buffer. A package object by definition contains other objects. * * The buffer is assumed to have sufficient space for the object. * The caller must have verified the buffer length needed using * the acpi_ut_get_object_size function before calling this function. * ******************************************************************************/ static acpi_status acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object, u8 * buffer, acpi_size * space_used) { union acpi_object *external_object; acpi_status status; struct acpi_pkg_info info; ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage); /* * First package at head of the buffer */ external_object = ACPI_CAST_PTR(union acpi_object, buffer); /* * Free space begins right after the first package */ info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.free_space = buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.object_space = 0; info.num_packages = 1; external_object->type = internal_object->common.type; external_object->package.count = internal_object->package.count; external_object->package.elements = ACPI_CAST_PTR(union acpi_object, info.free_space); /* * Leave room for an array of ACPI_OBJECTS in the buffer * and move the free space past it */ info.length += (acpi_size) external_object->package.count * ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.free_space += external_object->package.count * ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); status = acpi_ut_walk_package_tree(internal_object, external_object, acpi_ut_copy_ielement_to_eelement, &info); *space_used = info.length; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_iobject_to_eobject * * PARAMETERS: internal_object - The internal object to be converted * ret_buffer - Where the object is returned * * RETURN: Status * * DESCRIPTION: This function is called to build an API object to be returned * to the caller. * ******************************************************************************/ acpi_status acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object, struct acpi_buffer *ret_buffer) { acpi_status status; ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject); if (internal_object->common.type == ACPI_TYPE_PACKAGE) { /* * Package object: Copy all subobjects (including * nested packages) */ status = acpi_ut_copy_ipackage_to_epackage(internal_object, ret_buffer->pointer, &ret_buffer->length); } else { /* * Build a simple object (no nested objects) */ status = acpi_ut_copy_isimple_to_esimple(internal_object, ACPI_CAST_PTR(union acpi_object, ret_buffer-> pointer), ACPI_ADD_PTR(u8, ret_buffer-> pointer, ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object))), &ret_buffer->length); /* * build simple does not include the object size in the length * so we add it in here */ ret_buffer->length += sizeof(union acpi_object); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_esimple_to_isimple * * PARAMETERS: external_object - The external object to be converted * ret_internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: This function copies an external object to an internal one. * NOTE: Pointers can be copied, we don't need to copy data. * (The pointers have to be valid in our address space no matter * what we do with them!) * ******************************************************************************/ static acpi_status acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object, union acpi_operand_object **ret_internal_object) { union acpi_operand_object *internal_object; ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple); /* * Simple types supported are: String, Buffer, Integer */ switch (external_object->type) { case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: case ACPI_TYPE_INTEGER: case ACPI_TYPE_LOCAL_REFERENCE: internal_object = acpi_ut_create_internal_object((u8) external_object-> type); if (!internal_object) { return_ACPI_STATUS(AE_NO_MEMORY); } break; case ACPI_TYPE_ANY: /* This is the case for a NULL object */ *ret_internal_object = NULL; return_ACPI_STATUS(AE_OK); default: /* All other types are not supported */ ACPI_ERROR((AE_INFO, "Unsupported object type, cannot convert to internal object: %s", acpi_ut_get_type_name(external_object->type))); return_ACPI_STATUS(AE_SUPPORT); } /* Must COPY string and buffer contents */ switch (external_object->type) { case ACPI_TYPE_STRING: internal_object->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size) external_object->string.length + 1); if (!internal_object->string.pointer) { goto error_exit; } ACPI_MEMCPY(internal_object->string.pointer, external_object->string.pointer, external_object->string.length); internal_object->string.length = external_object->string.length; break; case ACPI_TYPE_BUFFER: internal_object->buffer.pointer = ACPI_ALLOCATE_ZEROED(external_object->buffer.length); if (!internal_object->buffer.pointer) { goto error_exit; } ACPI_MEMCPY(internal_object->buffer.pointer, external_object->buffer.pointer, external_object->buffer.length); internal_object->buffer.length = external_object->buffer.length; /* Mark buffer data valid */ internal_object->buffer.flags |= AOPOBJ_DATA_VALID; break; case ACPI_TYPE_INTEGER: internal_object->integer.value = external_object->integer.value; break; case ACPI_TYPE_LOCAL_REFERENCE: /* TBD: should validate incoming handle */ internal_object->reference.class = ACPI_REFCLASS_NAME; internal_object->reference.node = external_object->reference.handle; break; default: /* Other types can't get here */ break; } *ret_internal_object = internal_object; return_ACPI_STATUS(AE_OK); error_exit: acpi_ut_remove_reference(internal_object); return_ACPI_STATUS(AE_NO_MEMORY); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_epackage_to_ipackage * * PARAMETERS: external_object - The external object to be converted * internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: Copy an external package object to an internal package. * Handles nested packages. * ******************************************************************************/ static acpi_status acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, union acpi_operand_object **internal_object) { acpi_status status = AE_OK; union acpi_operand_object *package_object; union acpi_operand_object **package_elements; u32 i; ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage); /* Create the package object */ package_object = acpi_ut_create_package_object(external_object->package.count); if (!package_object) { return_ACPI_STATUS(AE_NO_MEMORY); } package_elements = package_object->package.elements; /* * Recursive implementation. Probably ok, since nested external packages * as parameters should be very rare. */ for (i = 0; i < external_object->package.count; i++) { status = acpi_ut_copy_eobject_to_iobject(&external_object->package. elements[i], &package_elements[i]); if (ACPI_FAILURE(status)) { /* Truncate package and delete it */ package_object->package.count = i; package_elements[i] = NULL; acpi_ut_remove_reference(package_object); return_ACPI_STATUS(status); } } /* Mark package data valid */ package_object->package.flags |= AOPOBJ_DATA_VALID; *internal_object = package_object; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_eobject_to_iobject * * PARAMETERS: external_object - The external object to be converted * internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: Converts an external object to an internal object. * ******************************************************************************/ acpi_status acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object, union acpi_operand_object **internal_object) { acpi_status status; ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject); if (external_object->type == ACPI_TYPE_PACKAGE) { status = acpi_ut_copy_epackage_to_ipackage(external_object, internal_object); } else { /* * Build a simple object (no nested objects) */ status = acpi_ut_copy_esimple_to_isimple(external_object, internal_object); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_simple_object * * PARAMETERS: source_desc - The internal object to be copied * dest_desc - New target object * * RETURN: Status * * DESCRIPTION: Simple copy of one internal object to another. Reference count * of the destination object is preserved. * ******************************************************************************/ static acpi_status acpi_ut_copy_simple_object(union acpi_operand_object *source_desc, union acpi_operand_object *dest_desc) { u16 reference_count; union acpi_operand_object *next_object; acpi_status status; acpi_size copy_size; /* Save fields from destination that we don't want to overwrite */ reference_count = dest_desc->common.reference_count; next_object = dest_desc->common.next_object; /* * Copy the entire source object over the destination object. * Note: Source can be either an operand object or namespace node. */ copy_size = sizeof(union acpi_operand_object); if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { copy_size = sizeof(struct acpi_namespace_node); } ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc), ACPI_CAST_PTR(char, source_desc), copy_size); /* Restore the saved fields */ dest_desc->common.reference_count = reference_count; dest_desc->common.next_object = next_object; /* New object is not static, regardless of source */ dest_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; /* Handle the objects with extra data */ switch (dest_desc->common.type) { case ACPI_TYPE_BUFFER: /* * Allocate and copy the actual buffer if and only if: * 1) There is a valid buffer pointer * 2) The buffer has a length > 0 */ if ((source_desc->buffer.pointer) && (source_desc->buffer.length)) { dest_desc->buffer.pointer = ACPI_ALLOCATE(source_desc->buffer.length); if (!dest_desc->buffer.pointer) { return (AE_NO_MEMORY); } /* Copy the actual buffer data */ ACPI_MEMCPY(dest_desc->buffer.pointer, source_desc->buffer.pointer, source_desc->buffer.length); } break; case ACPI_TYPE_STRING: /* * Allocate and copy the actual string if and only if: * 1) There is a valid string pointer * (Pointer to a NULL string is allowed) */ if (source_desc->string.pointer) { dest_desc->string.pointer = ACPI_ALLOCATE((acpi_size) source_desc->string. length + 1); if (!dest_desc->string.pointer) { return (AE_NO_MEMORY); } /* Copy the actual string data */ ACPI_MEMCPY(dest_desc->string.pointer, source_desc->string.pointer, (acpi_size) source_desc->string.length + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: /* * We copied the reference object, so we now must add a reference * to the object pointed to by the reference * * DDBHandle reference (from Load/load_table) is a special reference, * it does not have a Reference.Object, so does not need to * increase the reference count */ if (source_desc->reference.class == ACPI_REFCLASS_TABLE) { break; } acpi_ut_add_reference(source_desc->reference.object); break; case ACPI_TYPE_REGION: /* * We copied the Region Handler, so we now must add a reference */ if (dest_desc->region.handler) { acpi_ut_add_reference(dest_desc->region.handler); } break; /* * For Mutex and Event objects, we cannot simply copy the underlying * OS object. We must create a new one. */ case ACPI_TYPE_MUTEX: status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex); if (ACPI_FAILURE(status)) { return status; } break; case ACPI_TYPE_EVENT: status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &dest_desc->event. os_semaphore); if (ACPI_FAILURE(status)) { return status; } break; default: /* Nothing to do for other simple objects */ break; } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ielement_to_ielement * * PARAMETERS: acpi_pkg_callback * * RETURN: Status * * DESCRIPTION: Copy one package element to another package element * ******************************************************************************/ static acpi_status acpi_ut_copy_ielement_to_ielement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context) { acpi_status status = AE_OK; u32 this_index; union acpi_operand_object **this_target_ptr; union acpi_operand_object *target_object; ACPI_FUNCTION_ENTRY(); this_index = state->pkg.index; this_target_ptr = (union acpi_operand_object **) &state->pkg.dest_object->package.elements[this_index]; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: /* A null source object indicates a (legal) null package element */ if (source_object) { /* * This is a simple object, just copy it */ target_object = acpi_ut_create_internal_object(source_object-> common.type); if (!target_object) { return (AE_NO_MEMORY); } status = acpi_ut_copy_simple_object(source_object, target_object); if (ACPI_FAILURE(status)) { goto error_exit; } *this_target_ptr = target_object; } else { /* Pass through a null element */ *this_target_ptr = NULL; } break; case ACPI_COPY_TYPE_PACKAGE: /* * This object is a package - go down another nesting level * Create and build the package object */ target_object = acpi_ut_create_package_object(source_object->package.count); if (!target_object) { return (AE_NO_MEMORY); } target_object->common.flags = source_object->common.flags; /* Pass the new package object back to the package walk routine */ state->pkg.this_target_obj = target_object; /* Store the object pointer in the parent package object */ *this_target_ptr = target_object; break; default: return (AE_BAD_PARAMETER); } return (status); error_exit: acpi_ut_remove_reference(target_object); return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ipackage_to_ipackage * * PARAMETERS: source_obj - Pointer to the source package object * dest_obj - Where the internal object is returned * walk_state - Current Walk state descriptor * * RETURN: Status * * DESCRIPTION: This function is called to copy an internal package object * into another internal package object. * ******************************************************************************/ static acpi_status acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, union acpi_operand_object *dest_obj, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage); dest_obj->common.type = source_obj->common.type; dest_obj->common.flags = source_obj->common.flags; dest_obj->package.count = source_obj->package.count; /* * Create the object array and walk the source package tree */ dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_obj->package. count + 1) * sizeof(void *)); if (!dest_obj->package.elements) { ACPI_ERROR((AE_INFO, "Package allocation failure")); return_ACPI_STATUS(AE_NO_MEMORY); } /* * Copy the package element-by-element by walking the package "tree". * This handles nested packages of arbitrary depth. */ status = acpi_ut_walk_package_tree(source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); if (ACPI_FAILURE(status)) { /* On failure, delete the destination package object */ acpi_ut_remove_reference(dest_obj); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_iobject_to_iobject * * PARAMETERS: source_desc - The internal object to be copied * dest_desc - Where the copied object is returned * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Copy an internal object to a new internal object * ******************************************************************************/ acpi_status acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc, union acpi_operand_object **dest_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject); /* Create the top level object */ *dest_desc = acpi_ut_create_internal_object(source_desc->common.type); if (!*dest_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Copy the object and possible subobjects */ if (source_desc->common.type == ACPI_TYPE_PACKAGE) { status = acpi_ut_copy_ipackage_to_ipackage(source_desc, *dest_desc, walk_state); } else { status = acpi_ut_copy_simple_object(source_desc, *dest_desc); } return_ACPI_STATUS(status); }
gpl-2.0
v-superuser/android_kernel_htc_msm8974
drivers/acpi/acpica/tbfind.c
4919
4828
/****************************************************************************** * * Module Name: tbfind - find table * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbfind") /******************************************************************************* * * FUNCTION: acpi_tb_find_table * * PARAMETERS: Signature - String with ACPI table signature * oem_id - String with the table OEM ID * oem_table_id - String with the OEM Table ID * table_index - Where the table index is returned * * RETURN: Status and table index * * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the * Signature, OEM ID and OEM Table ID. Returns an index that can * be used to get the table header or entire table. * ******************************************************************************/ acpi_status acpi_tb_find_table(char *signature, char *oem_id, char *oem_table_id, u32 *table_index) { u32 i; acpi_status status; struct acpi_table_header header; ACPI_FUNCTION_TRACE(tb_find_table); /* Normalize the input strings */ ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header)); ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE); ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); /* Search for the table */ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature), header.signature, ACPI_NAME_SIZE)) { /* Not the requested table */ continue; } /* Table with matching signature has been found */ if (!acpi_gbl_root_table_list.tables[i].pointer) { /* Table is not currently mapped, map it */ status = acpi_tb_verify_table(&acpi_gbl_root_table_list. tables[i]); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (!acpi_gbl_root_table_list.tables[i].pointer) { continue; } } /* Check for table match on all IDs */ if (!ACPI_MEMCMP (acpi_gbl_root_table_list.tables[i].pointer->signature, header.signature, ACPI_NAME_SIZE) && (!oem_id[0] || !ACPI_MEMCMP (acpi_gbl_root_table_list. tables[i].pointer-> oem_id, header.oem_id, ACPI_OEM_ID_SIZE)) && (!oem_table_id[0] || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i]. pointer->oem_table_id, header.oem_table_id, ACPI_OEM_TABLE_ID_SIZE))) { *table_index = i; ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Found table [%4.4s]\n", header.signature)); return_ACPI_STATUS(AE_OK); } } return_ACPI_STATUS(AE_NOT_FOUND); }
gpl-2.0
IOKP/kernel_asus_flo
drivers/acpi/acpica/utlock.c
4919
5770
/****************************************************************************** * * Module Name: utlock - Reader/Writer lock interfaces * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utlock") /******************************************************************************* * * FUNCTION: acpi_ut_create_rw_lock * acpi_ut_delete_rw_lock * * PARAMETERS: Lock - Pointer to a valid RW lock * * RETURN: Status * * DESCRIPTION: Reader/writer lock creation and deletion interfaces. * ******************************************************************************/ acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock) { acpi_status status; lock->num_readers = 0; status = acpi_os_create_mutex(&lock->reader_mutex); if (ACPI_FAILURE(status)) { return status; } status = acpi_os_create_mutex(&lock->writer_mutex); return status; } void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) { acpi_os_delete_mutex(lock->reader_mutex); acpi_os_delete_mutex(lock->writer_mutex); lock->num_readers = 0; lock->reader_mutex = NULL; lock->writer_mutex = NULL; } /******************************************************************************* * * FUNCTION: acpi_ut_acquire_read_lock * acpi_ut_release_read_lock * * PARAMETERS: Lock - Pointer to a valid RW lock * * RETURN: Status * * DESCRIPTION: Reader interfaces for reader/writer locks. On acquisition, * only the first reader acquires the write mutex. On release, * only the last reader releases the write mutex. Although this * algorithm can in theory starve writers, this should not be a * problem with ACPICA since the subsystem is infrequently used * in comparison to (for example) an I/O system. * ******************************************************************************/ acpi_status acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock) { acpi_status status; status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { return status; } /* Acquire the write lock only for the first reader */ lock->num_readers++; if (lock->num_readers == 1) { status = acpi_os_acquire_mutex(lock->writer_mutex, ACPI_WAIT_FOREVER); } acpi_os_release_mutex(lock->reader_mutex); return status; } acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock) { acpi_status status; status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { return status; } /* Release the write lock only for the very last reader */ lock->num_readers--; if (lock->num_readers == 0) { acpi_os_release_mutex(lock->writer_mutex); } acpi_os_release_mutex(lock->reader_mutex); return status; } /******************************************************************************* * * FUNCTION: acpi_ut_acquire_write_lock * acpi_ut_release_write_lock * * PARAMETERS: Lock - Pointer to a valid RW lock * * RETURN: Status * * DESCRIPTION: Writer interfaces for reader/writer locks. Simply acquire or * release the writer mutex associated with the lock. Acquisition * of the lock is fully exclusive and will block all readers and * writers until it is released. * ******************************************************************************/ acpi_status acpi_ut_acquire_write_lock(struct acpi_rw_lock *lock) { acpi_status status; status = acpi_os_acquire_mutex(lock->writer_mutex, ACPI_WAIT_FOREVER); return status; } void acpi_ut_release_write_lock(struct acpi_rw_lock *lock) { acpi_os_release_mutex(lock->writer_mutex); }
gpl-2.0
Split-Screen/android_kernel_lge_gproj
drivers/acpi/acpica/utosi.c
4919
12320
/****************************************************************************** * * Module Name: utosi - Support for the _OSI predefined control method * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utosi") /* * Strings supported by the _OSI predefined control method (which is * implemented internally within this module.) * * March 2009: Removed "Linux" as this host no longer wants to respond true * for this string. Basically, the only safe OS strings are windows-related * and in many or most cases represent the only test path within the * BIOS-provided ASL code. * * The last element of each entry is used to track the newest version of * Windows that the BIOS has requested. */ static struct acpi_interface_info acpi_default_supported_interfaces[] = { /* Operating System Vendor Strings */ {"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000}, /* Windows 2000 */ {"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP}, /* Windows XP */ {"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */ {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */ {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */ {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */ {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */ {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */ {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */ {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */ {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */ /* Feature Group Strings */ {"Extended Address Space Descriptor", NULL, 0, 0} /* * All "optional" feature group strings (features that are implemented * by the host) should be dynamically added by the host via * acpi_install_interface and should not be manually added here. * * Examples of optional feature group strings: * * "Module Device" * "Processor Device" * "3.0 Thermal Model" * "3.0 _SCP Extensions" * "Processor Aggregator Device" */ }; /******************************************************************************* * * FUNCTION: acpi_ut_initialize_interfaces * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize the global _OSI supported interfaces list * ******************************************************************************/ acpi_status acpi_ut_initialize_interfaces(void) { u32 i; (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); acpi_gbl_supported_interfaces = acpi_default_supported_interfaces; /* Link the static list of supported interfaces */ for (i = 0; i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1); i++) { acpi_default_supported_interfaces[i].next = &acpi_default_supported_interfaces[(acpi_size) i + 1]; } acpi_os_release_mutex(acpi_gbl_osi_mutex); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_interface_terminate * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Delete all interfaces in the global list. Sets * acpi_gbl_supported_interfaces to NULL. * ******************************************************************************/ void acpi_ut_interface_terminate(void) { struct acpi_interface_info *next_interface; (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); next_interface = acpi_gbl_supported_interfaces; while (next_interface) { acpi_gbl_supported_interfaces = next_interface->next; /* Only interfaces added at runtime can be freed */ if (next_interface->flags & ACPI_OSI_DYNAMIC) { ACPI_FREE(next_interface->name); ACPI_FREE(next_interface); } next_interface = acpi_gbl_supported_interfaces; } acpi_os_release_mutex(acpi_gbl_osi_mutex); } /******************************************************************************* * * FUNCTION: acpi_ut_install_interface * * PARAMETERS: interface_name - The interface to install * * RETURN: Status * * DESCRIPTION: Install the interface into the global interface list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_install_interface(acpi_string interface_name) { struct acpi_interface_info *interface_info; /* Allocate info block and space for the name string */ interface_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_interface_info)); if (!interface_info) { return (AE_NO_MEMORY); } interface_info->name = ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1); if (!interface_info->name) { ACPI_FREE(interface_info); return (AE_NO_MEMORY); } /* Initialize new info and insert at the head of the global list */ ACPI_STRCPY(interface_info->name, interface_name); interface_info->flags = ACPI_OSI_DYNAMIC; interface_info->next = acpi_gbl_supported_interfaces; acpi_gbl_supported_interfaces = interface_info; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_interface * * PARAMETERS: interface_name - The interface to remove * * RETURN: Status * * DESCRIPTION: Remove the interface from the global interface list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_remove_interface(acpi_string interface_name) { struct acpi_interface_info *previous_interface; struct acpi_interface_info *next_interface; previous_interface = next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!ACPI_STRCMP(interface_name, next_interface->name)) { /* Found: name is in either the static list or was added at runtime */ if (next_interface->flags & ACPI_OSI_DYNAMIC) { /* Interface was added dynamically, remove and free it */ if (previous_interface == next_interface) { acpi_gbl_supported_interfaces = next_interface->next; } else { previous_interface->next = next_interface->next; } ACPI_FREE(next_interface->name); ACPI_FREE(next_interface); } else { /* * Interface is in static list. If marked invalid, then it * does not actually exist. Else, mark it invalid. */ if (next_interface->flags & ACPI_OSI_INVALID) { return (AE_NOT_EXIST); } next_interface->flags |= ACPI_OSI_INVALID; } return (AE_OK); } previous_interface = next_interface; next_interface = next_interface->next; } /* Interface was not found */ return (AE_NOT_EXIST); } /******************************************************************************* * * FUNCTION: acpi_ut_get_interface * * PARAMETERS: interface_name - The interface to find * * RETURN: struct acpi_interface_info if found. NULL if not found. * * DESCRIPTION: Search for the specified interface name in the global list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name) { struct acpi_interface_info *next_interface; next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!ACPI_STRCMP(interface_name, next_interface->name)) { return (next_interface); } next_interface = next_interface->next; } return (NULL); } /******************************************************************************* * * FUNCTION: acpi_ut_osi_implementation * * PARAMETERS: walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Implementation of the _OSI predefined control method. When * an invocation of _OSI is encountered in the system AML, * control is transferred to this function. * ******************************************************************************/ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state) { union acpi_operand_object *string_desc; union acpi_operand_object *return_desc; struct acpi_interface_info *interface_info; acpi_interface_handler interface_handler; u32 return_value; ACPI_FUNCTION_TRACE(ut_osi_implementation); /* Validate the string input argument (from the AML caller) */ string_desc = walk_state->arguments[0].object; if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) { return_ACPI_STATUS(AE_TYPE); } /* Create a return object */ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Default return value is 0, NOT SUPPORTED */ return_value = 0; (void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); /* Lookup the interface in the global _OSI list */ interface_info = acpi_ut_get_interface(string_desc->string.pointer); if (interface_info && !(interface_info->flags & ACPI_OSI_INVALID)) { /* * The interface is supported. * Update the osi_data if necessary. We keep track of the latest * version of Windows that has been requested by the BIOS. */ if (interface_info->value > acpi_gbl_osi_data) { acpi_gbl_osi_data = interface_info->value; } return_value = ACPI_UINT32_MAX; } acpi_os_release_mutex(acpi_gbl_osi_mutex); /* * Invoke an optional _OSI interface handler. The host OS may wish * to do some interface-specific handling. For example, warn about * certain interfaces or override the true/false support value. */ interface_handler = acpi_gbl_interface_handler; if (interface_handler) { return_value = interface_handler(string_desc->string.pointer, return_value); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, "ACPI: BIOS _OSI(\"%s\") is %ssupported\n", string_desc->string.pointer, return_value == 0 ? "not " : "")); /* Complete the return object */ return_desc->integer.value = return_value; walk_state->return_desc = return_desc; return_ACPI_STATUS(AE_OK); }
gpl-2.0
kbc-developers/android_kernel_htc_m7wlj
net/irda/irqueue.c
5431
23284
/********************************************************************* * * Filename: irqueue.c * Version: 0.3 * Description: General queue implementation * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Tue Jun 9 13:29:31 1998 * Modified at: Sun Dec 12 13:48:22 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * Modified at: Thu Jan 4 14:29:10 CET 2001 * Modified by: Marc Zyngier <mzyngier@freesurf.fr> * * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no> * Copyright (C) 1998, Dag Brattli, * All Rights Reserved. * * This code is taken from the Vortex Operating System written by Aage * Kvalnes. Aage has agreed that this code can use the GPL licence, * although he does not use that licence in his own code. * * This copyright does however _not_ include the ELF hash() function * which I currently don't know which licence or copyright it * has. Please inform me if you know. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ /* * NOTE : * There are various problems with this package : * o the hash function for ints is pathetic (but could be changed) * o locking is sometime suspicious (especially during enumeration) * o most users have only a few elements (== overhead) * o most users never use search, so don't benefit from hashing * Problem already fixed : * o not 64 bit compliant (most users do hashv = (int) self) * o hashbin_remove() is broken => use hashbin_remove_this() * I think most users would be better served by a simple linked list * (like include/linux/list.h) with a global spinlock per list. * Jean II */ /* * Notes on the concurrent access to hashbin and other SMP issues * ------------------------------------------------------------- * Hashbins are very often in the IrDA stack a global repository of * information, and therefore used in a very asynchronous manner following * various events (driver calls, timers, user calls...). * Therefore, very often it is highly important to consider the * management of concurrent access to the hashbin and how to guarantee the * consistency of the operations on it. * * First, we need to define the objective of locking : * 1) Protect user data (content pointed by the hashbin) * 2) Protect hashbin structure itself (linked list in each bin) * * OLD LOCKING * ----------- * * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were * both inadequate in *both* aspect. * o HB_GLOBAL was using a spinlock for each bin (local locking). * o HB_LOCAL was disabling irq on *all* CPUs, so use a single * global semaphore. * The problems were : * A) Global irq disabling is no longer supported by the kernel * B) No protection for the hashbin struct global data * o hashbin_delete() * o hb_current * C) No protection for user data in some cases * * A) HB_LOCAL use global irq disabling, so doesn't work on kernel * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its * performance is not satisfactory on SMP setups. Most hashbins were * HB_LOCAL, so (A) definitely need fixing. * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL * lock only the individual bins, it will never be able to lock the * global data, so can't do (B). * C) Some functions return pointer to data that is still in the * hashbin : * o hashbin_find() * o hashbin_get_first() * o hashbin_get_next() * As the data is still in the hashbin, it may be changed or free'd * while the caller is examinimg the data. In those case, locking can't * be done within the hashbin, but must include use of the data within * the caller. * The caller can easily do this with HB_LOCAL (just disable irqs). * However, this is impossible with HB_GLOBAL because the caller has no * way to know the proper bin, so don't know which spinlock to use. * * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is * fundamentally broken and will never work. * * NEW LOCKING * ----------- * * To fix those problems, I've introduce a few changes in the * hashbin locking : * 1) New HB_LOCK scheme * 2) hashbin->hb_spinlock * 3) New hashbin usage policy * * HB_LOCK : * ------- * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL * and HB_GLOBAL. It uses a single spinlock to protect the whole content * of the hashbin. As it is a single spinlock, it can protect the global * data of the hashbin and not only the bins themselves. * HB_LOCK can only protect some of the hashbin calls, so it only lock * call that can be made 100% safe and leave other call unprotected. * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin * content is always small contention is not high, so it doesn't matter * much. HB_LOCK is probably faster than HB_LOCAL. * * hashbin->hb_spinlock : * -------------------- * The spinlock that HB_LOCK uses is available for caller, so that * the caller can protect unprotected calls (see below). * If the caller want to do entirely its own locking (HB_NOLOCK), he * can do so and may use safely this spinlock. * Locking is done like this : * spin_lock_irqsave(&hashbin->hb_spinlock, flags); * Releasing the lock : * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); * * Safe & Protected calls : * ---------------------- * The following calls are safe or protected via HB_LOCK : * o hashbin_new() -> safe * o hashbin_delete() * o hashbin_insert() * o hashbin_remove_first() * o hashbin_remove() * o hashbin_remove_this() * o HASHBIN_GET_SIZE() -> atomic * * The following calls only protect the hashbin itself : * o hashbin_lock_find() * o hashbin_find_next() * * Unprotected calls : * ----------------- * The following calls need to be protected by the caller : * o hashbin_find() * o hashbin_get_first() * o hashbin_get_next() * * Locking Policy : * -------------- * If the hashbin is used only in a single thread of execution * (explicitly or implicitely), you can use HB_NOLOCK * If the calling module already provide concurrent access protection, * you may use HB_NOLOCK. * * In all other cases, you need to use HB_LOCK and lock the hashbin * every time before calling one of the unprotected calls. You also must * use the pointer returned by the unprotected call within the locked * region. * * Extra care for enumeration : * -------------------------- * hashbin_get_first() and hashbin_get_next() use the hashbin to * store the current position, in hb_current. * As long as the hashbin remains locked, this is safe. If you unlock * the hashbin, the current position may change if anybody else modify * or enumerate the hashbin. * Summary : do the full enumeration while locked. * * Alternatively, you may use hashbin_find_next(). But, this will * be slower, is more complex to use and doesn't protect the hashbin * content. So, care is needed here as well. * * Other issues : * ------------ * I believe that we are overdoing it by using spin_lock_irqsave() * and we should use only spin_lock_bh() or similar. But, I don't have * the balls to try it out. * Don't believe that because hashbin are now (somewhat) SMP safe * that the rest of the code is. Higher layers tend to be safest, * but LAP and LMP would need some serious dedicated love. * * Jean II */ #include <linux/module.h> #include <linux/slab.h> #include <net/irda/irda.h> #include <net/irda/irqueue.h> /************************ QUEUE SUBROUTINES ************************/ /* * Hashbin */ #define GET_HASHBIN(x) ( x & HASHBIN_MASK ) /* * Function hash (name) * * This function hash the input string 'name' using the ELF hash * function for strings. */ static __u32 hash( const char* name) { __u32 h = 0; __u32 g; while(*name) { h = (h<<4) + *name++; if ((g = (h & 0xf0000000))) h ^=g>>24; h &=~g; } return h; } /* * Function enqueue_first (queue, proc) * * Insert item first in queue. * */ static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) { IRDA_DEBUG( 4, "%s()\n", __func__); /* * Check if queue is empty. */ if ( *queue == NULL ) { /* * Queue is empty. Insert one element into the queue. */ element->q_next = element->q_prev = *queue = element; } else { /* * Queue is not empty. Insert element into front of queue. */ element->q_next = (*queue); (*queue)->q_prev->q_next = element; element->q_prev = (*queue)->q_prev; (*queue)->q_prev = element; (*queue) = element; } } /* * Function dequeue (queue) * * Remove first entry in queue * */ static irda_queue_t *dequeue_first(irda_queue_t **queue) { irda_queue_t *ret; IRDA_DEBUG( 4, "dequeue_first()\n"); /* * Set return value */ ret = *queue; if ( *queue == NULL ) { /* * Queue was empty. */ } else if ( (*queue)->q_next == *queue ) { /* * Queue only contained a single element. It will now be * empty. */ *queue = NULL; } else { /* * Queue contained several element. Remove the first one. */ (*queue)->q_prev->q_next = (*queue)->q_next; (*queue)->q_next->q_prev = (*queue)->q_prev; *queue = (*queue)->q_next; } /* * Return the removed entry (or NULL of queue was empty). */ return ret; } /* * Function dequeue_general (queue, element) * * */ static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) { irda_queue_t *ret; IRDA_DEBUG( 4, "dequeue_general()\n"); /* * Set return value */ ret = *queue; if ( *queue == NULL ) { /* * Queue was empty. */ } else if ( (*queue)->q_next == *queue ) { /* * Queue only contained a single element. It will now be * empty. */ *queue = NULL; } else { /* * Remove specific element. */ element->q_prev->q_next = element->q_next; element->q_next->q_prev = element->q_prev; if ( (*queue) == element) (*queue) = element->q_next; } /* * Return the removed entry (or NULL of queue was empty). */ return ret; } /************************ HASHBIN MANAGEMENT ************************/ /* * Function hashbin_create ( type, name ) * * Create hashbin! * */ hashbin_t *hashbin_new(int type) { hashbin_t* hashbin; /* * Allocate new hashbin */ hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC); if (!hashbin) return NULL; /* * Initialize structure */ hashbin->hb_type = type; hashbin->magic = HB_MAGIC; //hashbin->hb_current = NULL; /* Make sure all spinlock's are unlocked */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_init(&hashbin->hb_spinlock); } return hashbin; } EXPORT_SYMBOL(hashbin_new); /* * Function hashbin_delete (hashbin, free_func) * * Destroy hashbin, the free_func can be a user supplied special routine * for deallocating this structure if it's complex. If not the user can * just supply kfree, which should take care of the job. */ #ifdef CONFIG_LOCKDEP static int hashbin_lock_depth = 0; #endif int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) { irda_queue_t* queue; unsigned long flags = 0; int i; IRDA_ASSERT(hashbin != NULL, return -1;); IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, hashbin_lock_depth++); } /* * Free the entries in the hashbin, TODO: use hashbin_clear when * it has been shown to work */ for (i = 0; i < HASHBIN_SIZE; i ++ ) { queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); while (queue ) { if (free_func) (*free_func)(queue); queue = dequeue_first( (irda_queue_t**) &hashbin->hb_queue[i]); } } /* Cleanup local data */ hashbin->hb_current = NULL; hashbin->magic = ~HB_MAGIC; /* Release lock */ if ( hashbin->hb_type & HB_LOCK) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); #ifdef CONFIG_LOCKDEP hashbin_lock_depth--; #endif } /* * Free the hashbin structure */ kfree(hashbin); return 0; } EXPORT_SYMBOL(hashbin_delete); /********************* HASHBIN LIST OPERATIONS *********************/ /* * Function hashbin_insert (hashbin, entry, name) * * Insert an entry into the hashbin * */ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, const char* name) { unsigned long flags = 0; int bin; IRDA_DEBUG( 4, "%s()\n", __func__); IRDA_ASSERT( hashbin != NULL, return;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); /* * Locate hashbin */ if ( name ) hashv = hash( name ); bin = GET_HASHBIN( hashv ); /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ /* * Store name and key */ entry->q_hash = hashv; if ( name ) strlcpy( entry->q_name, name, sizeof(entry->q_name)); /* * Insert new entry first */ enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ], entry); hashbin->hb_size++; /* Release lock */ if ( hashbin->hb_type & HB_LOCK ) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ } EXPORT_SYMBOL(hashbin_insert); /* * Function hashbin_remove_first (hashbin) * * Remove first entry of the hashbin * * Note : this function no longer use hashbin_remove(), but does things * similar to hashbin_remove_this(), so can be considered safe. * Jean II */ void *hashbin_remove_first( hashbin_t *hashbin) { unsigned long flags = 0; irda_queue_t *entry = NULL; /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ entry = hashbin_get_first( hashbin); if ( entry != NULL) { int bin; long hashv; /* * Locate hashbin */ hashv = entry->q_hash; bin = GET_HASHBIN( hashv ); /* * Dequeue the entry... */ dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], (irda_queue_t*) entry ); hashbin->hb_size--; entry->q_next = NULL; entry->q_prev = NULL; /* * Check if this item is the currently selected item, and in * that case we must reset hb_current */ if ( entry == hashbin->hb_current) hashbin->hb_current = NULL; } /* Release lock */ if ( hashbin->hb_type & HB_LOCK ) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ return entry; } /* * Function hashbin_remove (hashbin, hashv, name) * * Remove entry with the given name * * The use of this function is highly discouraged, because the whole * concept behind hashbin_remove() is broken. In many cases, it's not * possible to guarantee the unicity of the index (either hashv or name), * leading to removing the WRONG entry. * The only simple safe use is : * hashbin_remove(hasbin, (int) self, NULL); * In other case, you must think hard to guarantee unicity of the index. * Jean II */ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) { int bin, found = FALSE; unsigned long flags = 0; irda_queue_t* entry; IRDA_DEBUG( 4, "%s()\n", __func__); IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); /* * Locate hashbin */ if ( name ) hashv = hash( name ); bin = GET_HASHBIN( hashv ); /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ /* * Search for entry */ entry = hashbin->hb_queue[ bin ]; if ( entry ) { do { /* * Check for key */ if ( entry->q_hash == hashv ) { /* * Name compare too? */ if ( name ) { if ( strcmp( entry->q_name, name) == 0) { found = TRUE; break; } } else { found = TRUE; break; } } entry = entry->q_next; } while ( entry != hashbin->hb_queue[ bin ] ); } /* * If entry was found, dequeue it */ if ( found ) { dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], (irda_queue_t*) entry ); hashbin->hb_size--; /* * Check if this item is the currently selected item, and in * that case we must reset hb_current */ if ( entry == hashbin->hb_current) hashbin->hb_current = NULL; } /* Release lock */ if ( hashbin->hb_type & HB_LOCK ) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ /* Return */ if ( found ) return entry; else return NULL; } EXPORT_SYMBOL(hashbin_remove); /* * Function hashbin_remove_this (hashbin, entry) * * Remove entry with the given name * * In some cases, the user of hashbin can't guarantee the unicity * of either the hashv or name. * In those cases, using the above function is guaranteed to cause troubles, * so we use this one instead... * And by the way, it's also faster, because we skip the search phase ;-) */ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) { unsigned long flags = 0; int bin; long hashv; IRDA_DEBUG( 4, "%s()\n", __func__); IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); IRDA_ASSERT( entry != NULL, return NULL;); /* Synchronize */ if ( hashbin->hb_type & HB_LOCK ) { spin_lock_irqsave(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ /* Check if valid and not already removed... */ if((entry->q_next == NULL) || (entry->q_prev == NULL)) { entry = NULL; goto out; } /* * Locate hashbin */ hashv = entry->q_hash; bin = GET_HASHBIN( hashv ); /* * Dequeue the entry... */ dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ], (irda_queue_t*) entry ); hashbin->hb_size--; entry->q_next = NULL; entry->q_prev = NULL; /* * Check if this item is the currently selected item, and in * that case we must reset hb_current */ if ( entry == hashbin->hb_current) hashbin->hb_current = NULL; out: /* Release lock */ if ( hashbin->hb_type & HB_LOCK ) { spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); } /* Default is no-lock */ return entry; } EXPORT_SYMBOL(hashbin_remove_this); /*********************** HASHBIN ENUMERATION ***********************/ /* * Function hashbin_common_find (hashbin, hashv, name) * * Find item with the given hashv or name * */ void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) { int bin; irda_queue_t* entry; IRDA_DEBUG( 4, "hashbin_find()\n"); IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); /* * Locate hashbin */ if ( name ) hashv = hash( name ); bin = GET_HASHBIN( hashv ); /* * Search for entry */ entry = hashbin->hb_queue[ bin]; if ( entry ) { do { /* * Check for key */ if ( entry->q_hash == hashv ) { /* * Name compare too? */ if ( name ) { if ( strcmp( entry->q_name, name ) == 0 ) { return entry; } } else { return entry; } } entry = entry->q_next; } while ( entry != hashbin->hb_queue[ bin ] ); } return NULL; } EXPORT_SYMBOL(hashbin_find); /* * Function hashbin_lock_find (hashbin, hashv, name) * * Find item with the given hashv or name * * Same, but with spinlock protection... * I call it safe, but it's only safe with respect to the hashbin, not its * content. - Jean II */ void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name ) { unsigned long flags = 0; irda_queue_t* entry; /* Synchronize */ spin_lock_irqsave(&hashbin->hb_spinlock, flags); /* * Search for entry */ entry = hashbin_find(hashbin, hashv, name); /* Release lock */ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); return entry; } EXPORT_SYMBOL(hashbin_lock_find); /* * Function hashbin_find (hashbin, hashv, name, pnext) * * Find an item with the given hashv or name, and its successor * * This function allow to do concurrent enumerations without the * need to lock over the whole session, because the caller keep the * context of the search. On the other hand, it might fail and return * NULL if the entry is removed. - Jean II */ void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name, void ** pnext) { unsigned long flags = 0; irda_queue_t* entry; /* Synchronize */ spin_lock_irqsave(&hashbin->hb_spinlock, flags); /* * Search for current entry * This allow to check if the current item is still in the * hashbin or has been removed. */ entry = hashbin_find(hashbin, hashv, name); /* * Trick hashbin_get_next() to return what we want */ if(entry) { hashbin->hb_current = entry; *pnext = hashbin_get_next( hashbin ); } else *pnext = NULL; /* Release lock */ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); return entry; } /* * Function hashbin_get_first (hashbin) * * Get a pointer to first element in hashbin, this function must be * called before any calls to hashbin_get_next()! * */ irda_queue_t *hashbin_get_first( hashbin_t* hashbin) { irda_queue_t *entry; int i; IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); if ( hashbin == NULL) return NULL; for ( i = 0; i < HASHBIN_SIZE; i ++ ) { entry = hashbin->hb_queue[ i]; if ( entry) { hashbin->hb_current = entry; return entry; } } /* * Did not find any item in hashbin */ return NULL; } EXPORT_SYMBOL(hashbin_get_first); /* * Function hashbin_get_next (hashbin) * * Get next item in hashbin. A series of hashbin_get_next() calls must * be started by a call to hashbin_get_first(). The function returns * NULL when all items have been traversed * * The context of the search is stored within the hashbin, so you must * protect yourself from concurrent enumerations. - Jean II */ irda_queue_t *hashbin_get_next( hashbin_t *hashbin) { irda_queue_t* entry; int bin; int i; IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); if ( hashbin->hb_current == NULL) { IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;); return NULL; } entry = hashbin->hb_current->q_next; bin = GET_HASHBIN( entry->q_hash); /* * Make sure that we are not back at the beginning of the queue * again */ if ( entry != hashbin->hb_queue[ bin ]) { hashbin->hb_current = entry; return entry; } /* * Check that this is not the last queue in hashbin */ if ( bin >= HASHBIN_SIZE) return NULL; /* * Move to next queue in hashbin */ bin++; for ( i = bin; i < HASHBIN_SIZE; i++ ) { entry = hashbin->hb_queue[ i]; if ( entry) { hashbin->hb_current = entry; return entry; } } return NULL; } EXPORT_SYMBOL(hashbin_get_next);
gpl-2.0
SVMP/kernel
lib/syscall.c
7735
2475
#include <linux/ptrace.h> #include <linux/sched.h> #include <linux/export.h> #include <asm/syscall.h> static int collect_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { struct pt_regs *regs = task_pt_regs(target); if (unlikely(!regs)) return -EAGAIN; *sp = user_stack_pointer(regs); *pc = instruction_pointer(regs); *callno = syscall_get_nr(target, regs); if (*callno != -1L && maxargs > 0) syscall_get_arguments(target, regs, 0, maxargs, args); return 0; } /** * task_current_syscall - Discover what a blocked task is doing. * @target: thread to examine * @callno: filled with system call number or -1 * @args: filled with @maxargs system call arguments * @maxargs: number of elements in @args to fill * @sp: filled with user stack pointer * @pc: filled with user PC * * If @target is blocked in a system call, returns zero with *@callno * set to the the call's number and @args filled in with its arguments. * Registers not used for system call arguments may not be available and * it is not kosher to use &struct user_regset calls while the system * call is still in progress. Note we may get this result if @target * has finished its system call but not yet returned to user mode, such * as when it's stopped for signal handling or syscall exit tracing. * * If @target is blocked in the kernel during a fault or exception, * returns zero with *@callno set to -1 and does not fill in @args. * If so, it's now safe to examine @target using &struct user_regset * get() calls as long as we're sure @target won't return to user mode. * * Returns -%EAGAIN if @target does not remain blocked. * * Returns -%EINVAL if @maxargs is too large (maximum is six). */ int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { long state; unsigned long ncsw; if (unlikely(maxargs > 6)) return -EINVAL; if (target == current) return collect_syscall(target, callno, args, maxargs, sp, pc); state = target->state; if (unlikely(!state)) return -EAGAIN; ncsw = wait_task_inactive(target, state); if (unlikely(!ncsw) || unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || unlikely(wait_task_inactive(target, state) != ncsw)) return -EAGAIN; return 0; } EXPORT_SYMBOL_GPL(task_current_syscall);
gpl-2.0
phalf/android_kernel_samsung_mint-vlx-all
drivers/staging/tidspbridge/core/ue_deh.c
7991
6802
/* * ue_deh.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Implements upper edge DSP exception handling (DEH) functions. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * Copyright (C) 2010 Felipe Contreras * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <plat/dmtimer.h> #include <dspbridge/dbdefs.h> #include <dspbridge/dspdeh.h> #include <dspbridge/dev.h> #include "_tiomap.h" #include "_deh.h" #include <dspbridge/io_sm.h> #include <dspbridge/drv.h> #include <dspbridge/wdt.h> static u32 fault_addr; static void mmu_fault_dpc(unsigned long data) { struct deh_mgr *deh = (void *)data; if (!deh) return; bridge_deh_notify(deh, DSP_MMUFAULT, 0); } static irqreturn_t mmu_fault_isr(int irq, void *data) { struct deh_mgr *deh = data; struct cfg_hostres *resources; u32 event; if (!deh) return IRQ_HANDLED; resources = deh->bridge_context->resources; if (!resources) { dev_dbg(bridge, "%s: Failed to get Host Resources\n", __func__); return IRQ_HANDLED; } hw_mmu_event_status(resources->dmmu_base, &event); if (event == HW_MMU_TRANSLATION_FAULT) { hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr); dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, event, fault_addr); /* * Schedule a DPC directly. In the future, it may be * necessary to check if DSP MMU fault is intended for * Bridge. */ tasklet_schedule(&deh->dpc_tasklet); /* Disable the MMU events, else once we clear it will * start to raise INTs again */ hw_mmu_event_disable(resources->dmmu_base, HW_MMU_TRANSLATION_FAULT); } else { hw_mmu_event_disable(resources->dmmu_base, HW_MMU_ALL_INTERRUPTS); } return IRQ_HANDLED; } int bridge_deh_create(struct deh_mgr **ret_deh, struct dev_object *hdev_obj) { int status; struct deh_mgr *deh; struct bridge_dev_context *hbridge_context = NULL; /* Message manager will be created when a file is loaded, since * size of message buffer in shared memory is configurable in * the base image. */ /* Get Bridge context info. */ dev_get_bridge_context(hdev_obj, &hbridge_context); /* Allocate IO manager object: */ deh = kzalloc(sizeof(*deh), GFP_KERNEL); if (!deh) { status = -ENOMEM; goto err; } /* Create an NTFY object to manage notifications */ deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); if (!deh->ntfy_obj) { status = -ENOMEM; goto err; } ntfy_init(deh->ntfy_obj); /* Create a MMUfault DPC */ tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); /* Fill in context structure */ deh->bridge_context = hbridge_context; /* Install ISR function for DSP MMU fault */ status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, "DspBridge\tiommu fault", deh); if (status < 0) goto err; *ret_deh = deh; return 0; err: bridge_deh_destroy(deh); *ret_deh = NULL; return status; } int bridge_deh_destroy(struct deh_mgr *deh) { if (!deh) return -EFAULT; /* If notification object exists, delete it */ if (deh->ntfy_obj) { ntfy_delete(deh->ntfy_obj); kfree(deh->ntfy_obj); } /* Disable DSP MMU fault */ free_irq(INT_DSP_MMU_IRQ, deh); /* Free DPC object */ tasklet_kill(&deh->dpc_tasklet); /* Deallocate the DEH manager object */ kfree(deh); return 0; } int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, u32 notify_type, struct dsp_notification *hnotification) { if (!deh) return -EFAULT; if (event_mask) return ntfy_register(deh->ntfy_obj, hnotification, event_mask, notify_type); else return ntfy_unregister(deh->ntfy_obj, hnotification); } #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) { struct cfg_hostres *resources; struct hw_mmu_map_attrs_t map_attrs = { .endianism = HW_LITTLE_ENDIAN, .element_size = HW_ELEM_SIZE16BIT, .mixed_size = HW_MMU_CPUES, }; void *dummy_va_addr; resources = dev_context->resources; dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC); /* * Before acking the MMU fault, let's make sure MMU can only * access entry #0. Then add a new entry so that the DSP OS * can continue in order to dump the stack. */ hw_mmu_twl_disable(resources->dmmu_base); hw_mmu_tlb_flush_all(resources->dmmu_base); hw_mmu_tlb_add(resources->dmmu_base, virt_to_phys(dummy_va_addr), fault_addr, HW_PAGE_SIZE4KB, 1, &map_attrs, HW_SET, HW_SET); dsp_clk_enable(DSP_CLK_GPT8); dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); /* Clear MMU interrupt */ hw_mmu_event_ack(resources->dmmu_base, HW_MMU_TRANSLATION_FAULT); dump_dsp_stack(dev_context); dsp_clk_disable(DSP_CLK_GPT8); hw_mmu_disable(resources->dmmu_base); free_page((unsigned long)dummy_va_addr); } #endif static inline const char *event_to_string(int event) { switch (event) { case DSP_SYSERROR: return "DSP_SYSERROR"; break; case DSP_MMUFAULT: return "DSP_MMUFAULT"; break; case DSP_PWRERROR: return "DSP_PWRERROR"; break; case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break; default: return "unkown event"; break; } } void bridge_deh_notify(struct deh_mgr *deh, int event, int info) { struct bridge_dev_context *dev_context; const char *str = event_to_string(event); if (!deh) return; dev_dbg(bridge, "%s: device exception", __func__); dev_context = deh->bridge_context; switch (event) { case DSP_SYSERROR: dev_err(bridge, "%s: %s, info=0x%x", __func__, str, info); #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE dump_dl_modules(dev_context); dump_dsp_stack(dev_context); #endif break; case DSP_MMUFAULT: dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, fault_addr); #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE print_dsp_trace_buffer(dev_context); dump_dl_modules(dev_context); mmu_fault_print_stack(dev_context); #endif break; default: dev_err(bridge, "%s: %s", __func__, str); break; } /* Filter subsequent notifications when an error occurs */ if (dev_context->brd_state != BRD_ERROR) { ntfy_notify(deh->ntfy_obj, event); #ifdef CONFIG_TIDSPBRIDGE_RECOVERY bridge_recover_schedule(); #endif } /* Set the Board state as ERROR */ dev_context->brd_state = BRD_ERROR; /* Disable all the clocks that were enabled by DSP */ dsp_clock_disable_all(dev_context->dsp_per_clks); /* * Avoid the subsequent WDT if it happens once, * also if fatal error occurs. */ dsp_wdt_enable(false); }
gpl-2.0
blackdeviant/nickless
drivers/net/wireless/atmel_pci.c
9015
2535
/*** -*- linux-c -*- ********************************************************** Driver for Atmel at76c502 at76c504 and at76c506 wireless cards. Copyright 2004 Simon Kelley. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Atmel wireless lan drivers; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ******************************************************************************/ #include <linux/pci.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include "atmel.h" MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); static DEFINE_PCI_DEVICE_TABLE(card_ids) = { { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, { 0, } }; MODULE_DEVICE_TABLE(pci, card_ids); static int atmel_pci_probe(struct pci_dev *, const struct pci_device_id *); static void atmel_pci_remove(struct pci_dev *); static struct pci_driver atmel_driver = { .name = "atmel", .id_table = card_ids, .probe = atmel_pci_probe, .remove = __devexit_p(atmel_pci_remove), }; static int __devinit atmel_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pent) { struct net_device *dev; if (pci_enable_device(pdev)) return -ENODEV; pci_set_master(pdev); dev = init_atmel_card(pdev->irq, pdev->resource[1].start, ATMEL_FW_TYPE_506, &pdev->dev, NULL, NULL); if (!dev) return -ENODEV; pci_set_drvdata(pdev, dev); return 0; } static void __devexit atmel_pci_remove(struct pci_dev *pdev) { stop_atmel_card(pci_get_drvdata(pdev)); } static int __init atmel_init_module(void) { return pci_register_driver(&atmel_driver); } static void __exit atmel_cleanup_module(void) { pci_unregister_driver(&atmel_driver); } module_init(atmel_init_module); module_exit(atmel_cleanup_module);
gpl-2.0
krosk/android-omap-tuna-sideload
drivers/net/wireless/atmel_pci.c
9015
2535
/*** -*- linux-c -*- ********************************************************** Driver for Atmel at76c502 at76c504 and at76c506 wireless cards. Copyright 2004 Simon Kelley. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Atmel wireless lan drivers; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ******************************************************************************/ #include <linux/pci.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include "atmel.h" MODULE_AUTHOR("Simon Kelley"); MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards."); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); static DEFINE_PCI_DEVICE_TABLE(card_ids) = { { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, { 0, } }; MODULE_DEVICE_TABLE(pci, card_ids); static int atmel_pci_probe(struct pci_dev *, const struct pci_device_id *); static void atmel_pci_remove(struct pci_dev *); static struct pci_driver atmel_driver = { .name = "atmel", .id_table = card_ids, .probe = atmel_pci_probe, .remove = __devexit_p(atmel_pci_remove), }; static int __devinit atmel_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pent) { struct net_device *dev; if (pci_enable_device(pdev)) return -ENODEV; pci_set_master(pdev); dev = init_atmel_card(pdev->irq, pdev->resource[1].start, ATMEL_FW_TYPE_506, &pdev->dev, NULL, NULL); if (!dev) return -ENODEV; pci_set_drvdata(pdev, dev); return 0; } static void __devexit atmel_pci_remove(struct pci_dev *pdev) { stop_atmel_card(pci_get_drvdata(pdev)); } static int __init atmel_init_module(void) { return pci_register_driver(&atmel_driver); } static void __exit atmel_cleanup_module(void) { pci_unregister_driver(&atmel_driver); } module_init(atmel_init_module); module_exit(atmel_cleanup_module);
gpl-2.0
sunny256/linux
drivers/mtd/nand/fsmc_nand.c
56
30829
/* * drivers/mtd/nand/fsmc_nand.c * * ST Microelectronics * Flexible Static Memory Controller (FSMC) * Driver for NAND portions * * Copyright © 2010 ST Microelectronics * Vipin Kumar <vipin.kumar@st.com> * Ashish Priyadarshi * * Based on drivers/mtd/nand/nomadik_nand.c * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/dmaengine.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/resource.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> #include <linux/mtd/nand_ecc.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/mtd/partitions.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/amba/bus.h> #include <mtd/mtd-abi.h> /* fsmc controller registers for NOR flash */ #define CTRL 0x0 /* ctrl register definitions */ #define BANK_ENABLE (1 << 0) #define MUXED (1 << 1) #define NOR_DEV (2 << 2) #define WIDTH_8 (0 << 4) #define WIDTH_16 (1 << 4) #define RSTPWRDWN (1 << 6) #define WPROT (1 << 7) #define WRT_ENABLE (1 << 12) #define WAIT_ENB (1 << 13) #define CTRL_TIM 0x4 /* ctrl_tim register definitions */ #define FSMC_NOR_BANK_SZ 0x8 #define FSMC_NOR_REG_SIZE 0x40 #define FSMC_NOR_REG(base, bank, reg) (base + \ FSMC_NOR_BANK_SZ * (bank) + \ reg) /* fsmc controller registers for NAND flash */ #define PC 0x00 /* pc register definitions */ #define FSMC_RESET (1 << 0) #define FSMC_WAITON (1 << 1) #define FSMC_ENABLE (1 << 2) #define FSMC_DEVTYPE_NAND (1 << 3) #define FSMC_DEVWID_8 (0 << 4) #define FSMC_DEVWID_16 (1 << 4) #define FSMC_ECCEN (1 << 6) #define FSMC_ECCPLEN_512 (0 << 7) #define FSMC_ECCPLEN_256 (1 << 7) #define FSMC_TCLR_1 (1) #define FSMC_TCLR_SHIFT (9) #define FSMC_TCLR_MASK (0xF) #define FSMC_TAR_1 (1) #define FSMC_TAR_SHIFT (13) #define FSMC_TAR_MASK (0xF) #define STS 0x04 /* sts register definitions */ #define FSMC_CODE_RDY (1 << 15) #define COMM 0x08 /* comm register definitions */ #define FSMC_TSET_0 0 #define FSMC_TSET_SHIFT 0 #define FSMC_TSET_MASK 0xFF #define FSMC_TWAIT_6 6 #define FSMC_TWAIT_SHIFT 8 #define FSMC_TWAIT_MASK 0xFF #define FSMC_THOLD_4 4 #define FSMC_THOLD_SHIFT 16 #define FSMC_THOLD_MASK 0xFF #define FSMC_THIZ_1 1 #define FSMC_THIZ_SHIFT 24 #define FSMC_THIZ_MASK 0xFF #define ATTRIB 0x0C #define IOATA 0x10 #define ECC1 0x14 #define ECC2 0x18 #define ECC3 0x1C #define FSMC_NAND_BANK_SZ 0x20 #define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \ (FSMC_NAND_BANK_SZ * (bank)) + \ reg) #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) struct fsmc_nand_timings { uint8_t tclr; uint8_t tar; uint8_t thiz; uint8_t thold; uint8_t twait; uint8_t tset; }; enum access_mode { USE_DMA_ACCESS = 1, USE_WORD_ACCESS, }; /** * struct fsmc_nand_data - structure for FSMC NAND device state * * @pid: Part ID on the AMBA PrimeCell format * @mtd: MTD info for a NAND flash. * @nand: Chip related info for a NAND flash. * @partitions: Partition info for a NAND Flash. * @nr_partitions: Total number of partition of a NAND flash. * * @bank: Bank number for probed device. * @clk: Clock structure for FSMC. * * @read_dma_chan: DMA channel for read access * @write_dma_chan: DMA channel for write access to NAND * @dma_access_complete: Completion structure * * @data_pa: NAND Physical port for Data. * @data_va: NAND port for Data. * @cmd_va: NAND port for Command. * @addr_va: NAND port for Address. * @regs_va: FSMC regs base address. */ struct fsmc_nand_data { u32 pid; struct nand_chip nand; unsigned int bank; struct device *dev; enum access_mode mode; struct clk *clk; /* DMA related objects */ struct dma_chan *read_dma_chan; struct dma_chan *write_dma_chan; struct completion dma_access_complete; struct fsmc_nand_timings *dev_timings; dma_addr_t data_pa; void __iomem *data_va; void __iomem *cmd_va; void __iomem *addr_va; void __iomem *regs_va; }; static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); if (section >= chip->ecc.steps) return -ERANGE; oobregion->offset = (section * 16) + 2; oobregion->length = 3; return 0; } static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); if (section >= chip->ecc.steps) return -ERANGE; oobregion->offset = (section * 16) + 8; if (section < chip->ecc.steps - 1) oobregion->length = 8; else oobregion->length = mtd->oobsize - oobregion->offset; return 0; } static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = { .ecc = fsmc_ecc1_ooblayout_ecc, .free = fsmc_ecc1_ooblayout_free, }; /* * ECC placement definitions in oobfree type format. * There are 13 bytes of ecc for every 512 byte block and it has to be read * consecutively and immediately after the 512 byte data block for hardware to * generate the error bit offsets in 512 byte data. */ static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); if (section >= chip->ecc.steps) return -ERANGE; oobregion->length = chip->ecc.bytes; if (!section && mtd->writesize <= 512) oobregion->offset = 0; else oobregion->offset = (section * 16) + 2; return 0; } static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); if (section >= chip->ecc.steps) return -ERANGE; oobregion->offset = (section * 16) + 15; if (section < chip->ecc.steps - 1) oobregion->length = 3; else oobregion->length = mtd->oobsize - oobregion->offset; return 0; } static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = { .ecc = fsmc_ecc4_ooblayout_ecc, .free = fsmc_ecc4_ooblayout_free, }; static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd) { return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand); } /* * fsmc_cmd_ctrl - For facilitaing Hardware access * This routine allows hardware specific access to control-lines(ALE,CLE) */ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd_to_nand(mtd); struct fsmc_nand_data *host = mtd_to_fsmc(mtd); void __iomem *regs = host->regs_va; unsigned int bank = host->bank; if (ctrl & NAND_CTRL_CHANGE) { u32 pc; if (ctrl & NAND_CLE) { this->IO_ADDR_R = host->cmd_va; this->IO_ADDR_W = host->cmd_va; } else if (ctrl & NAND_ALE) { this->IO_ADDR_R = host->addr_va; this->IO_ADDR_W = host->addr_va; } else { this->IO_ADDR_R = host->data_va; this->IO_ADDR_W = host->data_va; } pc = readl(FSMC_NAND_REG(regs, bank, PC)); if (ctrl & NAND_NCE) pc |= FSMC_ENABLE; else pc &= ~FSMC_ENABLE; writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC)); } mb(); if (cmd != NAND_CMD_NONE) writeb_relaxed(cmd, this->IO_ADDR_W); } /* * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine * * This routine initializes timing parameters related to NAND memory access in * FSMC registers */ static void fsmc_nand_setup(struct fsmc_nand_data *host, struct fsmc_nand_timings *tims) { uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON; uint32_t tclr, tar, thiz, thold, twait, tset; unsigned int bank = host->bank; void __iomem *regs = host->regs_va; tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT; tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT; thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT; thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT; twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT; tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT; if (host->nand.options & NAND_BUSWIDTH_16) writel_relaxed(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC)); else writel_relaxed(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC)); writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar, FSMC_NAND_REG(regs, bank, PC)); writel_relaxed(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM)); writel_relaxed(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB)); } static int fsmc_calc_timings(struct fsmc_nand_data *host, const struct nand_sdr_timings *sdrt, struct fsmc_nand_timings *tims) { unsigned long hclk = clk_get_rate(host->clk); unsigned long hclkn = NSEC_PER_SEC / hclk; uint32_t thiz, thold, twait, tset; if (sdrt->tRC_min < 30000) return -EOPNOTSUPP; tims->tar = DIV_ROUND_UP(sdrt->tAR_min / 1000, hclkn) - 1; if (tims->tar > FSMC_TAR_MASK) tims->tar = FSMC_TAR_MASK; tims->tclr = DIV_ROUND_UP(sdrt->tCLR_min / 1000, hclkn) - 1; if (tims->tclr > FSMC_TCLR_MASK) tims->tclr = FSMC_TCLR_MASK; thiz = sdrt->tCS_min - sdrt->tWP_min; tims->thiz = DIV_ROUND_UP(thiz / 1000, hclkn); thold = sdrt->tDH_min; if (thold < sdrt->tCH_min) thold = sdrt->tCH_min; if (thold < sdrt->tCLH_min) thold = sdrt->tCLH_min; if (thold < sdrt->tWH_min) thold = sdrt->tWH_min; if (thold < sdrt->tALH_min) thold = sdrt->tALH_min; if (thold < sdrt->tREH_min) thold = sdrt->tREH_min; tims->thold = DIV_ROUND_UP(thold / 1000, hclkn); if (tims->thold == 0) tims->thold = 1; else if (tims->thold > FSMC_THOLD_MASK) tims->thold = FSMC_THOLD_MASK; twait = max(sdrt->tRP_min, sdrt->tWP_min); tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1; if (tims->twait == 0) tims->twait = 1; else if (tims->twait > FSMC_TWAIT_MASK) tims->twait = FSMC_TWAIT_MASK; tset = max(sdrt->tCS_min - sdrt->tWP_min, sdrt->tCEA_max - sdrt->tREA_max); tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1; if (tims->tset == 0) tims->tset = 1; else if (tims->tset > FSMC_TSET_MASK) tims->tset = FSMC_TSET_MASK; return 0; } static int fsmc_setup_data_interface(struct mtd_info *mtd, int csline, const struct nand_data_interface *conf) { struct nand_chip *nand = mtd_to_nand(mtd); struct fsmc_nand_data *host = nand_get_controller_data(nand); struct fsmc_nand_timings tims; const struct nand_sdr_timings *sdrt; int ret; sdrt = nand_get_sdr_timings(conf); if (IS_ERR(sdrt)) return PTR_ERR(sdrt); ret = fsmc_calc_timings(host, sdrt, &tims); if (ret) return ret; if (csline == NAND_DATA_IFACE_CHECK_ONLY) return 0; fsmc_nand_setup(host, &tims); return 0; } /* * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers */ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode) { struct fsmc_nand_data *host = mtd_to_fsmc(mtd); void __iomem *regs = host->regs_va; uint32_t bank = host->bank; writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256, FSMC_NAND_REG(regs, bank, PC)); writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN, FSMC_NAND_REG(regs, bank, PC)); writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN, FSMC_NAND_REG(regs, bank, PC)); } /* * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to * max of 8-bits) */ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data, uint8_t *ecc) { struct fsmc_nand_data *host = mtd_to_fsmc(mtd); void __iomem *regs = host->regs_va; uint32_t bank = host->bank; uint32_t ecc_tmp; unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT; do { if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY) break; else cond_resched(); } while (!time_after_eq(jiffies, deadline)); if (time_after_eq(jiffies, deadline)) { dev_err(host->dev, "calculate ecc timed out\n"); return -ETIMEDOUT; } ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1)); ecc[0] = (uint8_t) (ecc_tmp >> 0); ecc[1] = (uint8_t) (ecc_tmp >> 8); ecc[2] = (uint8_t) (ecc_tmp >> 16); ecc[3] = (uint8_t) (ecc_tmp >> 24); ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2)); ecc[4] = (uint8_t) (ecc_tmp >> 0); ecc[5] = (uint8_t) (ecc_tmp >> 8); ecc[6] = (uint8_t) (ecc_tmp >> 16); ecc[7] = (uint8_t) (ecc_tmp >> 24); ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3)); ecc[8] = (uint8_t) (ecc_tmp >> 0); ecc[9] = (uint8_t) (ecc_tmp >> 8); ecc[10] = (uint8_t) (ecc_tmp >> 16); ecc[11] = (uint8_t) (ecc_tmp >> 24); ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS)); ecc[12] = (uint8_t) (ecc_tmp >> 16); return 0; } /* * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to * max of 1-bit) */ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data, uint8_t *ecc) { struct fsmc_nand_data *host = mtd_to_fsmc(mtd); void __iomem *regs = host->regs_va; uint32_t bank = host->bank; uint32_t ecc_tmp; ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1)); ecc[0] = (uint8_t) (ecc_tmp >> 0); ecc[1] = (uint8_t) (ecc_tmp >> 8); ecc[2] = (uint8_t) (ecc_tmp >> 16); return 0; } /* Count the number of 0's in buff upto a max of max_bits */ static int count_written_bits(uint8_t *buff, int size, int max_bits) { int k, written_bits = 0; for (k = 0; k < size; k++) { written_bits += hweight8(~buff[k]); if (written_bits > max_bits) break; } return written_bits; } static void dma_complete(void *param) { struct fsmc_nand_data *host = param; complete(&host->dma_access_complete); } static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, enum dma_data_direction direction) { struct dma_chan *chan; struct dma_device *dma_dev; struct dma_async_tx_descriptor *tx; dma_addr_t dma_dst, dma_src, dma_addr; dma_cookie_t cookie; unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; int ret; unsigned long time_left; if (direction == DMA_TO_DEVICE) chan = host->write_dma_chan; else if (direction == DMA_FROM_DEVICE) chan = host->read_dma_chan; else return -EINVAL; dma_dev = chan->device; dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); if (direction == DMA_TO_DEVICE) { dma_src = dma_addr; dma_dst = host->data_pa; } else { dma_src = host->data_pa; dma_dst = dma_addr; } tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags); if (!tx) { dev_err(host->dev, "device_prep_dma_memcpy error\n"); ret = -EIO; goto unmap_dma; } tx->callback = dma_complete; tx->callback_param = host; cookie = tx->tx_submit(tx); ret = dma_submit_error(cookie); if (ret) { dev_err(host->dev, "dma_submit_error %d\n", cookie); goto unmap_dma; } dma_async_issue_pending(chan); time_left = wait_for_completion_timeout(&host->dma_access_complete, msecs_to_jiffies(3000)); if (time_left == 0) { dmaengine_terminate_all(chan); dev_err(host->dev, "wait_for_completion_timeout\n"); ret = -ETIMEDOUT; goto unmap_dma; } ret = 0; unmap_dma: dma_unmap_single(dma_dev->dev, dma_addr, len, direction); return ret; } /* * fsmc_write_buf - write buffer to chip * @mtd: MTD device structure * @buf: data buffer * @len: number of bytes to write */ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { int i; struct nand_chip *chip = mtd_to_nand(mtd); if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) && IS_ALIGNED(len, sizeof(uint32_t))) { uint32_t *p = (uint32_t *)buf; len = len >> 2; for (i = 0; i < len; i++) writel_relaxed(p[i], chip->IO_ADDR_W); } else { for (i = 0; i < len; i++) writeb_relaxed(buf[i], chip->IO_ADDR_W); } } /* * fsmc_read_buf - read chip data into buffer * @mtd: MTD device structure * @buf: buffer to store date * @len: number of bytes to read */ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { int i; struct nand_chip *chip = mtd_to_nand(mtd); if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) && IS_ALIGNED(len, sizeof(uint32_t))) { uint32_t *p = (uint32_t *)buf; len = len >> 2; for (i = 0; i < len; i++) p[i] = readl_relaxed(chip->IO_ADDR_R); } else { for (i = 0; i < len; i++) buf[i] = readb_relaxed(chip->IO_ADDR_R); } } /* * fsmc_read_buf_dma - read chip data into buffer * @mtd: MTD device structure * @buf: buffer to store date * @len: number of bytes to read */ static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len) { struct fsmc_nand_data *host = mtd_to_fsmc(mtd); dma_xfer(host, buf, len, DMA_FROM_DEVICE); } /* * fsmc_write_buf_dma - write buffer to chip * @mtd: MTD device structure * @buf: data buffer * @len: number of bytes to write */ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf, int len) { struct fsmc_nand_data *host = mtd_to_fsmc(mtd); dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); } /* * fsmc_read_page_hwecc * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller expects OOB data read to chip->oob_poi * @page: page number to read * * This routine is needed for fsmc version 8 as reading from NAND chip has to be * performed in a strict sequence as follows: * data(512 byte) -> ecc(13 byte) * After this read, fsmc hardware generates and reports error data bits(up to a * max of 8 bits) */ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { int i, j, s, stat, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_calc = chip->buffers->ecccalc; uint8_t *ecc_code = chip->buffers->ecccode; int off, len, group = 0; /* * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we * end up reading 14 bytes (7 words) from oob. The local array is * to maintain word alignment */ uint16_t ecc_oob[7]; uint8_t *oob = (uint8_t *)&ecc_oob[0]; unsigned int max_bitflips = 0; for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); chip->ecc.hwctl(mtd, NAND_ECC_READ); chip->read_buf(mtd, p, eccsize); for (j = 0; j < eccbytes;) { struct mtd_oob_region oobregion; int ret; ret = mtd_ooblayout_ecc(mtd, group++, &oobregion); if (ret) return ret; off = oobregion.offset; len = oobregion.length; /* * length is intentionally kept a higher multiple of 2 * to read at least 13 bytes even in case of 16 bit NAND * devices */ if (chip->options & NAND_BUSWIDTH_16) len = roundup(len, 2); chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); chip->read_buf(mtd, oob + j, len); j += len; } memcpy(&ecc_code[i], oob, chip->ecc.bytes); chip->ecc.calculate(mtd, p, &ecc_calc[i]); stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); if (stat < 0) { mtd->ecc_stats.failed++; } else { mtd->ecc_stats.corrected += stat; max_bitflips = max_t(unsigned int, max_bitflips, stat); } } return max_bitflips; } /* * fsmc_bch8_correct_data * @mtd: mtd info structure * @dat: buffer of read data * @read_ecc: ecc read from device spare area * @calc_ecc: ecc calculated from read data * * calc_ecc is a 104 bit information containing maximum of 8 error * offset informations of 13 bits each in 512 bytes of read data. */ static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) { struct nand_chip *chip = mtd_to_nand(mtd); struct fsmc_nand_data *host = mtd_to_fsmc(mtd); void __iomem *regs = host->regs_va; unsigned int bank = host->bank; uint32_t err_idx[8]; uint32_t num_err, i; uint32_t ecc1, ecc2, ecc3, ecc4; num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF; /* no bit flipping */ if (likely(num_err == 0)) return 0; /* too many errors */ if (unlikely(num_err > 8)) { /* * This is a temporary erase check. A newly erased page read * would result in an ecc error because the oob data is also * erased to FF and the calculated ecc for an FF data is not * FF..FF. * This is a workaround to skip performing correction in case * data is FF..FF * * Logic: * For every page, each bit written as 0 is counted until these * number of bits are greater than 8 (the maximum correction * capability of FSMC for each 512 + 13 bytes) */ int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8); int bits_data = count_written_bits(dat, chip->ecc.size, 8); if ((bits_ecc + bits_data) <= 8) { if (bits_data) memset(dat, 0xff, chip->ecc.size); return bits_data; } return -EBADMSG; } /* * ------------------- calc_ecc[] bit wise -----------|--13 bits--| * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--| * * calc_ecc is a 104 bit information containing maximum of 8 error * offset informations of 13 bits each. calc_ecc is copied into a * uint64_t array and error offset indexes are populated in err_idx * array */ ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1)); ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2)); ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3)); ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS)); err_idx[0] = (ecc1 >> 0) & 0x1FFF; err_idx[1] = (ecc1 >> 13) & 0x1FFF; err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F); err_idx[3] = (ecc2 >> 7) & 0x1FFF; err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF); err_idx[5] = (ecc3 >> 1) & 0x1FFF; err_idx[6] = (ecc3 >> 14) & 0x1FFF; err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F); i = 0; while (num_err--) { change_bit(0, (unsigned long *)&err_idx[i]); change_bit(1, (unsigned long *)&err_idx[i]); if (err_idx[i] < chip->ecc.size * 8) { change_bit(err_idx[i], (unsigned long *)dat); i++; } } return i; } static bool filter(struct dma_chan *chan, void *slave) { chan->private = slave; return true; } static int fsmc_nand_probe_config_dt(struct platform_device *pdev, struct fsmc_nand_data *host, struct nand_chip *nand) { struct device_node *np = pdev->dev.of_node; u32 val; int ret; nand->options = 0; if (!of_property_read_u32(np, "bank-width", &val)) { if (val == 2) { nand->options |= NAND_BUSWIDTH_16; } else if (val != 1) { dev_err(&pdev->dev, "invalid bank-width %u\n", val); return -EINVAL; } } if (of_get_property(np, "nand-skip-bbtscan", NULL)) nand->options |= NAND_SKIP_BBTSCAN; host->dev_timings = devm_kzalloc(&pdev->dev, sizeof(*host->dev_timings), GFP_KERNEL); if (!host->dev_timings) return -ENOMEM; ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings, sizeof(*host->dev_timings)); if (ret) host->dev_timings = NULL; /* Set default NAND bank to 0 */ host->bank = 0; if (!of_property_read_u32(np, "bank", &val)) { if (val > 3) { dev_err(&pdev->dev, "invalid bank %u\n", val); return -EINVAL; } host->bank = val; } return 0; } /* * fsmc_nand_probe - Probe function * @pdev: platform device structure */ static int __init fsmc_nand_probe(struct platform_device *pdev) { struct fsmc_nand_data *host; struct mtd_info *mtd; struct nand_chip *nand; struct resource *res; dma_cap_mask_t mask; int ret = 0; u32 pid; int i; /* Allocate memory for the device structure (and zero it) */ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; nand = &host->nand; ret = fsmc_nand_probe_config_dt(pdev, host, nand); if (ret) return ret; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); host->data_va = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(host->data_va)) return PTR_ERR(host->data_va); host->data_pa = (dma_addr_t)res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr"); host->addr_va = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(host->addr_va)) return PTR_ERR(host->addr_va); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); host->cmd_va = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(host->cmd_va)) return PTR_ERR(host->cmd_va); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs"); host->regs_va = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(host->regs_va)) return PTR_ERR(host->regs_va); host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "failed to fetch block clock\n"); return PTR_ERR(host->clk); } ret = clk_prepare_enable(host->clk); if (ret) return ret; /* * This device ID is actually a common AMBA ID as used on the * AMBA PrimeCell bus. However it is not a PrimeCell. */ for (pid = 0, i = 0; i < 4; i++) pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8); host->pid = pid; dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, " "revision %02x, config %02x\n", AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid), AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid)); host->dev = &pdev->dev; if (host->mode == USE_DMA_ACCESS) init_completion(&host->dma_access_complete); /* Link all private pointers */ mtd = nand_to_mtd(&host->nand); nand_set_controller_data(nand, host); nand_set_flash_node(nand, pdev->dev.of_node); mtd->dev.parent = &pdev->dev; nand->IO_ADDR_R = host->data_va; nand->IO_ADDR_W = host->data_va; nand->cmd_ctrl = fsmc_cmd_ctrl; nand->chip_delay = 30; /* * Setup default ECC mode. nand_dt_init() called from nand_scan_ident() * can overwrite this value if the DT provides a different value. */ nand->ecc.mode = NAND_ECC_HW; nand->ecc.hwctl = fsmc_enable_hwecc; nand->ecc.size = 512; nand->badblockbits = 7; switch (host->mode) { case USE_DMA_ACCESS: dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); host->read_dma_chan = dma_request_channel(mask, filter, NULL); if (!host->read_dma_chan) { dev_err(&pdev->dev, "Unable to get read dma channel\n"); goto err_req_read_chnl; } host->write_dma_chan = dma_request_channel(mask, filter, NULL); if (!host->write_dma_chan) { dev_err(&pdev->dev, "Unable to get write dma channel\n"); goto err_req_write_chnl; } nand->read_buf = fsmc_read_buf_dma; nand->write_buf = fsmc_write_buf_dma; break; default: case USE_WORD_ACCESS: nand->read_buf = fsmc_read_buf; nand->write_buf = fsmc_write_buf; break; } if (host->dev_timings) fsmc_nand_setup(host, host->dev_timings); else nand->setup_data_interface = fsmc_setup_data_interface; if (AMBA_REV_BITS(host->pid) >= 8) { nand->ecc.read_page = fsmc_read_page_hwecc; nand->ecc.calculate = fsmc_read_hwecc_ecc4; nand->ecc.correct = fsmc_bch8_correct_data; nand->ecc.bytes = 13; nand->ecc.strength = 8; } /* * Scan to find existence of the device */ ret = nand_scan_ident(mtd, 1, NULL); if (ret) { dev_err(&pdev->dev, "No NAND Device found!\n"); goto err_scan_ident; } if (AMBA_REV_BITS(host->pid) >= 8) { switch (mtd->oobsize) { case 16: case 64: case 128: case 224: case 256: break; default: dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n", mtd->oobsize); ret = -EINVAL; goto err_probe; } mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops); } else { switch (nand->ecc.mode) { case NAND_ECC_HW: dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n"); nand->ecc.calculate = fsmc_read_hwecc_ecc1; nand->ecc.correct = nand_correct_data; nand->ecc.bytes = 3; nand->ecc.strength = 1; break; case NAND_ECC_SOFT: if (nand->ecc.algo == NAND_ECC_BCH) { dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n"); break; } case NAND_ECC_ON_DIE: break; default: dev_err(&pdev->dev, "Unsupported ECC mode!\n"); goto err_probe; } /* * Don't set layout for BCH4 SW ECC. This will be * generated later in nand_bch_init() later. */ if (nand->ecc.mode == NAND_ECC_HW) { switch (mtd->oobsize) { case 16: case 64: case 128: mtd_set_ooblayout(mtd, &fsmc_ecc1_ooblayout_ops); break; default: dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n", mtd->oobsize); ret = -EINVAL; goto err_probe; } } } /* Second stage of scan to fill MTD data-structures */ ret = nand_scan_tail(mtd); if (ret) goto err_probe; mtd->name = "nand"; ret = mtd_device_register(mtd, NULL, 0); if (ret) goto err_probe; platform_set_drvdata(pdev, host); dev_info(&pdev->dev, "FSMC NAND driver registration successful\n"); return 0; err_probe: err_scan_ident: if (host->mode == USE_DMA_ACCESS) dma_release_channel(host->write_dma_chan); err_req_write_chnl: if (host->mode == USE_DMA_ACCESS) dma_release_channel(host->read_dma_chan); err_req_read_chnl: clk_disable_unprepare(host->clk); return ret; } /* * Clean up routine */ static int fsmc_nand_remove(struct platform_device *pdev) { struct fsmc_nand_data *host = platform_get_drvdata(pdev); if (host) { nand_release(nand_to_mtd(&host->nand)); if (host->mode == USE_DMA_ACCESS) { dma_release_channel(host->write_dma_chan); dma_release_channel(host->read_dma_chan); } clk_disable_unprepare(host->clk); } return 0; } #ifdef CONFIG_PM_SLEEP static int fsmc_nand_suspend(struct device *dev) { struct fsmc_nand_data *host = dev_get_drvdata(dev); if (host) clk_disable_unprepare(host->clk); return 0; } static int fsmc_nand_resume(struct device *dev) { struct fsmc_nand_data *host = dev_get_drvdata(dev); if (host) { clk_prepare_enable(host->clk); if (host->dev_timings) fsmc_nand_setup(host, host->dev_timings); } return 0; } #endif static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume); static const struct of_device_id fsmc_nand_id_table[] = { { .compatible = "st,spear600-fsmc-nand" }, { .compatible = "stericsson,fsmc-nand" }, {} }; MODULE_DEVICE_TABLE(of, fsmc_nand_id_table); static struct platform_driver fsmc_nand_driver = { .remove = fsmc_nand_remove, .driver = { .name = "fsmc-nand", .of_match_table = fsmc_nand_id_table, .pm = &fsmc_nand_pm_ops, }, }; module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi"); MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
gpl-2.0
samno1607/XyZ
drivers/video/msm/mdss/mdss_mdp_util.c
56
9311
/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/android_pmem.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/ion.h> #include <linux/iommu.h> #include <linux/msm_kgsl.h> #include <linux/spinlock.h> #include <linux/types.h> #include <mach/iommu_domains.h> #include "mdss_fb.h" #include "mdss_mdp.h" #include "mdss_mdp_formats.h" enum { MDP_INTR_VSYNC_INTF_0, MDP_INTR_VSYNC_INTF_1, MDP_INTR_VSYNC_INTF_2, MDP_INTR_VSYNC_INTF_3, MDP_INTR_PING_PONG_0, MDP_INTR_PING_PONG_1, MDP_INTR_PING_PONG_2, MDP_INTR_WB_0, MDP_INTR_WB_1, MDP_INTR_WB_2, MDP_INTR_MAX, }; struct intr_callback { void (*func)(void *); void *arg; }; struct intr_callback mdp_intr_cb[MDP_INTR_MAX]; static DEFINE_SPINLOCK(mdss_mdp_intr_lock); static int mdss_mdp_intr2index(u32 intr_type, u32 intf_num) { int index = -1; switch (intr_type) { case MDSS_MDP_IRQ_INTF_VSYNC: index = MDP_INTR_VSYNC_INTF_0 + (intf_num - MDSS_MDP_INTF0); break; case MDSS_MDP_IRQ_PING_PONG_COMP: index = MDP_INTR_PING_PONG_0 + intf_num; break; case MDSS_MDP_IRQ_WB_ROT_COMP: index = MDP_INTR_WB_0 + intf_num; break; case MDSS_MDP_IRQ_WB_WFD: index = MDP_INTR_WB_2 + intf_num; break; } return index; } int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num, void (*fnc_ptr)(void *), void *arg) { unsigned long flags; int index, ret; index = mdss_mdp_intr2index(intr_type, intf_num); if (index < 0) { pr_warn("invalid intr type=%u intf_num=%u\n", intr_type, intf_num); return -EINVAL; } spin_lock_irqsave(&mdss_mdp_intr_lock, flags); if (!mdp_intr_cb[index].func) { mdp_intr_cb[index].func = fnc_ptr; mdp_intr_cb[index].arg = arg; ret = 0; } else { ret = -EBUSY; } spin_unlock_irqrestore(&mdss_mdp_intr_lock, flags); return ret; } static inline void mdss_mdp_intr_done(int index) { void (*fnc)(void *); void *arg; spin_lock(&mdss_mdp_intr_lock); fnc = mdp_intr_cb[index].func; arg = mdp_intr_cb[index].arg; spin_unlock(&mdss_mdp_intr_lock); if (fnc) fnc(arg); } irqreturn_t mdss_mdp_isr(int irq, void *ptr) { u32 isr, mask; isr = MDSS_MDP_REG_READ(MDSS_MDP_REG_INTR_STATUS); pr_debug("isr=%x\n", isr); if (isr == 0) goto done; mask = MDSS_MDP_REG_READ(MDSS_MDP_REG_INTR_EN); MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_CLEAR, isr); isr &= mask; if (isr == 0) goto done; if (isr & MDSS_MDP_INTR_PING_PONG_0_DONE) mdss_mdp_intr_done(MDP_INTR_PING_PONG_0); if (isr & MDSS_MDP_INTR_PING_PONG_1_DONE) mdss_mdp_intr_done(MDP_INTR_PING_PONG_1); if (isr & MDSS_MDP_INTR_PING_PONG_2_DONE) mdss_mdp_intr_done(MDP_INTR_PING_PONG_2); if (isr & MDSS_MDP_INTR_INTF_0_VSYNC) mdss_mdp_intr_done(MDP_INTR_VSYNC_INTF_0); if (isr & MDSS_MDP_INTR_INTF_1_VSYNC) mdss_mdp_intr_done(MDP_INTR_VSYNC_INTF_1); if (isr & MDSS_MDP_INTR_INTF_2_VSYNC) mdss_mdp_intr_done(MDP_INTR_VSYNC_INTF_2); if (isr & MDSS_MDP_INTR_INTF_3_VSYNC) mdss_mdp_intr_done(MDP_INTR_VSYNC_INTF_3); if (isr & MDSS_MDP_INTR_WB_0_DONE) mdss_mdp_intr_done(MDP_INTR_WB_0); if (isr & MDSS_MDP_INTR_WB_1_DONE) mdss_mdp_intr_done(MDP_INTR_WB_1); if (isr & MDSS_MDP_INTR_WB_2_DONE) mdss_mdp_intr_done(MDP_INTR_WB_2); done: return IRQ_HANDLED; } struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format) { if (format < MDP_IMGTYPE_LIMIT) { struct mdss_mdp_format_params *fmt = NULL; int i; for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_map); i++) { fmt = &mdss_mdp_format_map[i]; if (format == fmt->format) return fmt; } } return NULL; } int mdss_mdp_get_plane_sizes(u32 format, u32 w, u32 h, struct mdss_mdp_plane_sizes *ps) { struct mdss_mdp_format_params *fmt; int i; if (ps == NULL) return -EINVAL; if ((w > MAX_IMG_WIDTH) || (h > MAX_IMG_HEIGHT)) return -ERANGE; fmt = mdss_mdp_get_format_params(format); if (!fmt) return -EINVAL; memset(ps, 0, sizeof(struct mdss_mdp_plane_sizes)); if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) { u32 bpp = fmt->bpp; ps->num_planes = 1; ps->plane_size[0] = w * h * bpp; ps->ystride[0] = w * bpp; } else { u8 hmap[] = { 1, 2, 1, 2 }; u8 vmap[] = { 1, 1, 2, 2 }; u8 horiz, vert, stride_align; horiz = hmap[fmt->chroma_sample]; vert = vmap[fmt->chroma_sample]; switch (format) { case MDP_Y_CR_CB_GH2V2: stride_align = 16; break; case MDP_Y_CBCR_H2V2_VENUS: stride_align = 32; break; default: stride_align = 1; break; } ps->ystride[0] = ALIGN(w, stride_align); ps->ystride[1] = ALIGN(w / horiz, stride_align); ps->plane_size[0] = ps->ystride[0] * h; ps->plane_size[1] = ps->ystride[1] * (h / vert); if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) { ps->num_planes = 2; ps->plane_size[1] *= 2; ps->ystride[1] *= 2; } else { /* planar */ ps->num_planes = 3; ps->plane_size[2] = ps->plane_size[1]; ps->ystride[2] = ps->ystride[1]; } } for (i = 0; i < ps->num_planes; i++) ps->total_size += ps->plane_size[i]; return 0; } int mdss_mdp_data_check(struct mdss_mdp_data *data, struct mdss_mdp_plane_sizes *ps) { if (!ps) return 0; if (!data || data->num_planes == 0) return -ENOMEM; if (data->bwc_enabled) { return -EPERM; /* not supported */ } else { struct mdss_mdp_img_data *prev, *curr; int i; pr_debug("srcp0=%x len=%u frame_size=%u\n", data->p[0].addr, data->p[0].len, ps->total_size); for (i = 0; i < ps->num_planes; i++) { curr = &data->p[i]; if (i >= data->num_planes) { u32 psize = ps->plane_size[i-1]; prev = &data->p[i-1]; if (prev->len > psize) { curr->len = prev->len - psize; prev->len = psize; } curr->addr = prev->addr + psize; } if (curr->len < ps->plane_size[i]) { pr_err("insufficient mem=%u p=%d len=%u\n", curr->len, i, ps->plane_size[i]); return -ENOMEM; } pr_debug("plane[%d] addr=%x len=%u\n", i, curr->addr, curr->len); } data->num_planes = ps->num_planes; } return 0; } int mdss_mdp_put_img(struct mdss_mdp_img_data *data) { struct ion_client *iclient = mdss_get_ionclient(); if (data->flags & MDP_MEMORY_ID_TYPE_FB) { pr_debug("fb mem buf=0x%x\n", data->addr); fput_light(data->srcp_file, data->p_need); data->srcp_file = NULL; } else if (data->srcp_file) { pr_debug("pmem buf=0x%x\n", data->addr); put_pmem_file(data->srcp_file); data->srcp_file = NULL; } else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) { pr_debug("ion hdl=%p buf=0x%x\n", data->srcp_ihdl, data->addr); if (is_mdss_iommu_attached()) ion_unmap_iommu(iclient, data->srcp_ihdl, mdss_get_iommu_domain(), 0); ion_free(iclient, data->srcp_ihdl); data->srcp_ihdl = NULL; } else { return -ENOMEM; } return 0; } int mdss_mdp_get_img(struct msmfb_data *img, struct mdss_mdp_img_data *data) { struct file *file; int ret = -EINVAL; int fb_num; unsigned long *start, *len; struct ion_client *iclient = mdss_get_ionclient(); start = (unsigned long *) &data->addr; len = (unsigned long *) &data->len; data->flags = img->flags; data->p_need = 0; if (img->flags & MDP_BLIT_SRC_GEM) { data->srcp_file = NULL; ret = kgsl_gem_obj_addr(img->memory_id, (int) img->priv, start, len); } else if (img->flags & MDP_MEMORY_ID_TYPE_FB) { file = fget_light(img->memory_id, &data->p_need); if (file == NULL) { pr_err("invalid framebuffer file (%d)\n", img->memory_id); return -EINVAL; } data->srcp_file = file; if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) { fb_num = MINOR(file->f_dentry->d_inode->i_rdev); ret = mdss_fb_get_phys_info(start, len, fb_num); if (ret) pr_err("mdss_fb_get_phys_info() failed\n"); } else { pr_err("invalid FB_MAJOR\n"); ret = -1; } } else if (iclient) { data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id); if (IS_ERR_OR_NULL(data->srcp_ihdl)) { pr_err("error on ion_import_fd\n"); ret = PTR_ERR(data->srcp_ihdl); data->srcp_ihdl = NULL; return ret; } if (is_mdss_iommu_attached()) { ret = ion_map_iommu(iclient, data->srcp_ihdl, mdss_get_iommu_domain(), 0, SZ_4K, 0, start, len, 0, ION_IOMMU_UNMAP_DELAYED); } else { ret = ion_phys(iclient, data->srcp_ihdl, start, (size_t *) len); } if (IS_ERR_VALUE(ret)) { ion_free(iclient, data->srcp_ihdl); pr_err("failed to map ion handle (%d)\n", ret); return ret; } } else { unsigned long vstart; ret = get_pmem_file(img->memory_id, start, &vstart, len, &data->srcp_file); } if (!ret && (img->offset < data->len)) { data->addr += img->offset; data->len -= img->offset; pr_debug("mem=%d ihdl=%p buf=0x%x len=0x%x\n", img->memory_id, data->srcp_ihdl, data->addr, data->len); } else { return -EINVAL; } return ret; }
gpl-2.0
lwalkera/spark-linux
arch/arm/mach-davinci/devices-da8xx.c
56
16428
/* * DA8XX/OMAP L1XX platform device data * * Copyright (c) 2007-2009, MontaVista Software, Inc. <source@mvista.com> * Derived from code that was: * Copyright (C) 2006 Komal Shah <komal_shah802003@yahoo.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/serial_8250.h> #include <mach/cputype.h> #include <mach/common.h> #include <mach/time.h> #include <mach/da8xx.h> #include <mach/cpuidle.h> #include "clock.h" #define DA8XX_TPCC_BASE 0x01c00000 #define DA850_MMCSD1_BASE 0x01e1b000 #define DA850_TPCC1_BASE 0x01e30000 #define DA8XX_TPTC0_BASE 0x01c08000 #define DA8XX_TPTC1_BASE 0x01c08400 #define DA850_TPTC2_BASE 0x01e38000 #define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */ #define DA8XX_I2C0_BASE 0x01c22000 #define DA8XX_RTC_BASE 0x01C23000 #define DA8XX_EMAC_CPPI_PORT_BASE 0x01e20000 #define DA8XX_EMAC_CPGMACSS_BASE 0x01e22000 #define DA8XX_EMAC_CPGMAC_BASE 0x01e23000 #define DA8XX_EMAC_MDIO_BASE 0x01e24000 #define DA8XX_GPIO_BASE 0x01e26000 #define DA8XX_I2C1_BASE 0x01e28000 #define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000 #define DA8XX_EMAC_MOD_REG_OFFSET 0x2000 #define DA8XX_EMAC_RAM_OFFSET 0x0000 #define DA8XX_EMAC_CTRL_RAM_SIZE SZ_8K void __iomem *da8xx_syscfg0_base; void __iomem *da8xx_syscfg1_base; static struct plat_serial8250_port da8xx_serial_pdata[] = { { .mapbase = DA8XX_UART0_BASE, .irq = IRQ_DA8XX_UARTINT0, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, .regshift = 2, }, { .mapbase = DA8XX_UART1_BASE, .irq = IRQ_DA8XX_UARTINT1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, .regshift = 2, }, { .mapbase = DA8XX_UART2_BASE, .irq = IRQ_DA8XX_UARTINT2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, .regshift = 2, }, { .flags = 0, }, }; struct platform_device da8xx_serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = da8xx_serial_pdata, }, }; static const s8 da8xx_queue_tc_mapping[][2] = { /* {event queue no, TC no} */ {0, 0}, {1, 1}, {-1, -1} }; static const s8 da8xx_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 3}, {1, 7}, {-1, -1} }; static const s8 da850_queue_tc_mapping[][2] = { /* {event queue no, TC no} */ {0, 0}, {-1, -1} }; static const s8 da850_queue_priority_mapping[][2] = { /* {event queue no, Priority} */ {0, 3}, {-1, -1} }; static struct edma_soc_info da830_edma_cc0_info = { .n_channel = 32, .n_region = 4, .n_slot = 128, .n_tc = 2, .n_cc = 1, .queue_tc_mapping = da8xx_queue_tc_mapping, .queue_priority_mapping = da8xx_queue_priority_mapping, }; static struct edma_soc_info *da830_edma_info[EDMA_MAX_CC] = { &da830_edma_cc0_info, }; static struct edma_soc_info da850_edma_cc_info[] = { { .n_channel = 32, .n_region = 4, .n_slot = 128, .n_tc = 2, .n_cc = 1, .queue_tc_mapping = da8xx_queue_tc_mapping, .queue_priority_mapping = da8xx_queue_priority_mapping, }, { .n_channel = 32, .n_region = 4, .n_slot = 128, .n_tc = 1, .n_cc = 1, .queue_tc_mapping = da850_queue_tc_mapping, .queue_priority_mapping = da850_queue_priority_mapping, }, }; static struct edma_soc_info *da850_edma_info[EDMA_MAX_CC] = { &da850_edma_cc_info[0], &da850_edma_cc_info[1], }; static struct resource da830_edma_resources[] = { { .name = "edma_cc0", .start = DA8XX_TPCC_BASE, .end = DA8XX_TPCC_BASE + SZ_32K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc0", .start = DA8XX_TPTC0_BASE, .end = DA8XX_TPTC0_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc1", .start = DA8XX_TPTC1_BASE, .end = DA8XX_TPTC1_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma0", .start = IRQ_DA8XX_CCINT0, .flags = IORESOURCE_IRQ, }, { .name = "edma0_err", .start = IRQ_DA8XX_CCERRINT, .flags = IORESOURCE_IRQ, }, }; static struct resource da850_edma_resources[] = { { .name = "edma_cc0", .start = DA8XX_TPCC_BASE, .end = DA8XX_TPCC_BASE + SZ_32K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc0", .start = DA8XX_TPTC0_BASE, .end = DA8XX_TPTC0_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc1", .start = DA8XX_TPTC1_BASE, .end = DA8XX_TPTC1_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_cc1", .start = DA850_TPCC1_BASE, .end = DA850_TPCC1_BASE + SZ_32K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma_tc2", .start = DA850_TPTC2_BASE, .end = DA850_TPTC2_BASE + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .name = "edma0", .start = IRQ_DA8XX_CCINT0, .flags = IORESOURCE_IRQ, }, { .name = "edma0_err", .start = IRQ_DA8XX_CCERRINT, .flags = IORESOURCE_IRQ, }, { .name = "edma1", .start = IRQ_DA850_CCINT1, .flags = IORESOURCE_IRQ, }, { .name = "edma1_err", .start = IRQ_DA850_CCERRINT1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da830_edma_device = { .name = "edma", .id = -1, .dev = { .platform_data = da830_edma_info, }, .num_resources = ARRAY_SIZE(da830_edma_resources), .resource = da830_edma_resources, }; static struct platform_device da850_edma_device = { .name = "edma", .id = -1, .dev = { .platform_data = da850_edma_info, }, .num_resources = ARRAY_SIZE(da850_edma_resources), .resource = da850_edma_resources, }; int __init da830_register_edma(struct edma_rsv_info *rsv) { da830_edma_cc0_info.rsv = rsv; return platform_device_register(&da830_edma_device); } int __init da850_register_edma(struct edma_rsv_info *rsv[2]) { if (rsv) { da850_edma_cc_info[0].rsv = rsv[0]; da850_edma_cc_info[1].rsv = rsv[1]; } return platform_device_register(&da850_edma_device); } static struct resource da8xx_i2c_resources0[] = { { .start = DA8XX_I2C0_BASE, .end = DA8XX_I2C0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA8XX_I2CINT0, .end = IRQ_DA8XX_I2CINT0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_i2c_device0 = { .name = "i2c_davinci", .id = 1, .num_resources = ARRAY_SIZE(da8xx_i2c_resources0), .resource = da8xx_i2c_resources0, }; static struct resource da8xx_i2c_resources1[] = { { .start = DA8XX_I2C1_BASE, .end = DA8XX_I2C1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA8XX_I2CINT1, .end = IRQ_DA8XX_I2CINT1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_i2c_device1 = { .name = "i2c_davinci", .id = 2, .num_resources = ARRAY_SIZE(da8xx_i2c_resources1), .resource = da8xx_i2c_resources1, }; int __init da8xx_register_i2c(int instance, struct davinci_i2c_platform_data *pdata) { struct platform_device *pdev; if (instance == 0) pdev = &da8xx_i2c_device0; else if (instance == 1) pdev = &da8xx_i2c_device1; else return -EINVAL; pdev->dev.platform_data = pdata; return platform_device_register(pdev); } static struct resource da8xx_watchdog_resources[] = { { .start = DA8XX_WDOG_BASE, .end = DA8XX_WDOG_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device da8xx_wdt_device = { .name = "watchdog", .id = -1, .num_resources = ARRAY_SIZE(da8xx_watchdog_resources), .resource = da8xx_watchdog_resources, }; int __init da8xx_register_watchdog(void) { return platform_device_register(&da8xx_wdt_device); } static struct resource da8xx_emac_resources[] = { { .start = DA8XX_EMAC_CPPI_PORT_BASE, .end = DA8XX_EMAC_CPPI_PORT_BASE + SZ_16K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA8XX_C0_RX_THRESH_PULSE, .end = IRQ_DA8XX_C0_RX_THRESH_PULSE, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_C0_RX_PULSE, .end = IRQ_DA8XX_C0_RX_PULSE, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_C0_TX_PULSE, .end = IRQ_DA8XX_C0_TX_PULSE, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA8XX_C0_MISC_PULSE, .end = IRQ_DA8XX_C0_MISC_PULSE, .flags = IORESOURCE_IRQ, }, }; struct emac_platform_data da8xx_emac_pdata = { .ctrl_reg_offset = DA8XX_EMAC_CTRL_REG_OFFSET, .ctrl_mod_reg_offset = DA8XX_EMAC_MOD_REG_OFFSET, .ctrl_ram_offset = DA8XX_EMAC_RAM_OFFSET, .ctrl_ram_size = DA8XX_EMAC_CTRL_RAM_SIZE, .version = EMAC_VERSION_2, }; static struct platform_device da8xx_emac_device = { .name = "davinci_emac", .id = 1, .dev = { .platform_data = &da8xx_emac_pdata, }, .num_resources = ARRAY_SIZE(da8xx_emac_resources), .resource = da8xx_emac_resources, }; static struct resource da8xx_mdio_resources[] = { { .start = DA8XX_EMAC_MDIO_BASE, .end = DA8XX_EMAC_MDIO_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device da8xx_mdio_device = { .name = "davinci_mdio", .id = 0, .num_resources = ARRAY_SIZE(da8xx_mdio_resources), .resource = da8xx_mdio_resources, }; int __init da8xx_register_emac(void) { int ret; ret = platform_device_register(&da8xx_mdio_device); if (ret < 0) return ret; ret = platform_device_register(&da8xx_emac_device); if (ret < 0) return ret; ret = clk_add_alias(NULL, dev_name(&da8xx_mdio_device.dev), NULL, &da8xx_emac_device.dev); return ret; } static struct resource da830_mcasp1_resources[] = { { .name = "mcasp1", .start = DAVINCI_DA830_MCASP1_REG_BASE, .end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1, .flags = IORESOURCE_MEM, }, /* TX event */ { .start = DAVINCI_DA830_DMA_MCASP1_AXEVT, .end = DAVINCI_DA830_DMA_MCASP1_AXEVT, .flags = IORESOURCE_DMA, }, /* RX event */ { .start = DAVINCI_DA830_DMA_MCASP1_AREVT, .end = DAVINCI_DA830_DMA_MCASP1_AREVT, .flags = IORESOURCE_DMA, }, }; static struct platform_device da830_mcasp1_device = { .name = "davinci-mcasp", .id = 1, .num_resources = ARRAY_SIZE(da830_mcasp1_resources), .resource = da830_mcasp1_resources, }; static struct resource da850_mcasp_resources[] = { { .name = "mcasp", .start = DAVINCI_DA8XX_MCASP0_REG_BASE, .end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1, .flags = IORESOURCE_MEM, }, /* TX event */ { .start = DAVINCI_DA8XX_DMA_MCASP0_AXEVT, .end = DAVINCI_DA8XX_DMA_MCASP0_AXEVT, .flags = IORESOURCE_DMA, }, /* RX event */ { .start = DAVINCI_DA8XX_DMA_MCASP0_AREVT, .end = DAVINCI_DA8XX_DMA_MCASP0_AREVT, .flags = IORESOURCE_DMA, }, }; static struct platform_device da850_mcasp_device = { .name = "davinci-mcasp", .id = 0, .num_resources = ARRAY_SIZE(da850_mcasp_resources), .resource = da850_mcasp_resources, }; void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) { /* DA830/OMAP-L137 has 3 instances of McASP */ if (cpu_is_davinci_da830() && id == 1) { da830_mcasp1_device.dev.platform_data = pdata; platform_device_register(&da830_mcasp1_device); } else if (cpu_is_davinci_da850()) { da850_mcasp_device.dev.platform_data = pdata; platform_device_register(&da850_mcasp_device); } } static const struct display_panel disp_panel = { QVGA, 16, 16, COLOR_ACTIVE, }; static struct lcd_ctrl_config lcd_cfg = { &disp_panel, .ac_bias = 255, .ac_bias_intrpt = 0, .dma_burst_sz = 16, .bpp = 16, .fdd = 255, .tft_alt_mode = 0, .stn_565_mode = 0, .mono_8bit_mode = 0, .invert_line_clock = 1, .invert_frm_clock = 1, .sync_edge = 0, .sync_ctrl = 1, .raster_order = 0, }; struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = { .manu_name = "sharp", .controller_data = &lcd_cfg, .type = "Sharp_LCD035Q3DG01", }; struct da8xx_lcdc_platform_data sharp_lk043t1dg01_pdata = { .manu_name = "sharp", .controller_data = &lcd_cfg, .type = "Sharp_LK043T1DG01", }; static struct resource da8xx_lcdc_resources[] = { [0] = { /* registers */ .start = DA8XX_LCD_CNTRL_BASE, .end = DA8XX_LCD_CNTRL_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { /* interrupt */ .start = IRQ_DA8XX_LCDINT, .end = IRQ_DA8XX_LCDINT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_lcdc_device = { .name = "da8xx_lcdc", .id = 0, .num_resources = ARRAY_SIZE(da8xx_lcdc_resources), .resource = da8xx_lcdc_resources, }; int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata) { da8xx_lcdc_device.dev.platform_data = pdata; return platform_device_register(&da8xx_lcdc_device); } static struct resource da8xx_mmcsd0_resources[] = { { /* registers */ .start = DA8XX_MMCSD0_BASE, .end = DA8XX_MMCSD0_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { /* interrupt */ .start = IRQ_DA8XX_MMCSDINT0, .end = IRQ_DA8XX_MMCSDINT0, .flags = IORESOURCE_IRQ, }, { /* DMA RX */ .start = EDMA_CTLR_CHAN(0, 16), .end = EDMA_CTLR_CHAN(0, 16), .flags = IORESOURCE_DMA, }, { /* DMA TX */ .start = EDMA_CTLR_CHAN(0, 17), .end = EDMA_CTLR_CHAN(0, 17), .flags = IORESOURCE_DMA, }, }; static struct platform_device da8xx_mmcsd0_device = { .name = "davinci_mmc", .id = 0, .num_resources = ARRAY_SIZE(da8xx_mmcsd0_resources), .resource = da8xx_mmcsd0_resources, }; int __init da8xx_register_mmcsd0(struct davinci_mmc_config *config) { da8xx_mmcsd0_device.dev.platform_data = config; return platform_device_register(&da8xx_mmcsd0_device); } #ifdef CONFIG_ARCH_DAVINCI_DA850 static struct resource da850_mmcsd1_resources[] = { { /* registers */ .start = DA850_MMCSD1_BASE, .end = DA850_MMCSD1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { /* interrupt */ .start = IRQ_DA850_MMCSDINT0_1, .end = IRQ_DA850_MMCSDINT0_1, .flags = IORESOURCE_IRQ, }, { /* DMA RX */ .start = EDMA_CTLR_CHAN(1, 28), .end = EDMA_CTLR_CHAN(1, 28), .flags = IORESOURCE_DMA, }, { /* DMA TX */ .start = EDMA_CTLR_CHAN(1, 29), .end = EDMA_CTLR_CHAN(1, 29), .flags = IORESOURCE_DMA, }, }; static struct platform_device da850_mmcsd1_device = { .name = "davinci_mmc", .id = 1, .num_resources = ARRAY_SIZE(da850_mmcsd1_resources), .resource = da850_mmcsd1_resources, }; int __init da850_register_mmcsd1(struct davinci_mmc_config *config) { da850_mmcsd1_device.dev.platform_data = config; return platform_device_register(&da850_mmcsd1_device); } #endif static struct resource da8xx_rtc_resources[] = { { .start = DA8XX_RTC_BASE, .end = DA8XX_RTC_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { /* timer irq */ .start = IRQ_DA8XX_RTC, .end = IRQ_DA8XX_RTC, .flags = IORESOURCE_IRQ, }, { /* alarm irq */ .start = IRQ_DA8XX_RTC, .end = IRQ_DA8XX_RTC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da8xx_rtc_device = { .name = "omap_rtc", .id = -1, .num_resources = ARRAY_SIZE(da8xx_rtc_resources), .resource = da8xx_rtc_resources, }; int da8xx_register_rtc(void) { int ret; void __iomem *base; base = ioremap(DA8XX_RTC_BASE, SZ_4K); if (WARN_ON(!base)) return -ENOMEM; /* Unlock the rtc's registers */ __raw_writel(0x83e70b13, base + 0x6c); __raw_writel(0x95a4f1e0, base + 0x70); iounmap(base); ret = platform_device_register(&da8xx_rtc_device); if (!ret) /* Atleast on DA850, RTC is a wakeup source */ device_init_wakeup(&da8xx_rtc_device.dev, true); return ret; } static void __iomem *da8xx_ddr2_ctlr_base; void __iomem * __init da8xx_get_mem_ctlr(void) { if (da8xx_ddr2_ctlr_base) return da8xx_ddr2_ctlr_base; da8xx_ddr2_ctlr_base = ioremap(DA8XX_DDR2_CTL_BASE, SZ_32K); if (!da8xx_ddr2_ctlr_base) pr_warning("%s: Unable to map DDR2 controller", __func__); return da8xx_ddr2_ctlr_base; } static struct resource da8xx_cpuidle_resources[] = { { .start = DA8XX_DDR2_CTL_BASE, .end = DA8XX_DDR2_CTL_BASE + SZ_32K - 1, .flags = IORESOURCE_MEM, }, }; /* DA8XX devices support DDR2 power down */ static struct davinci_cpuidle_config da8xx_cpuidle_pdata = { .ddr2_pdown = 1, }; static struct platform_device da8xx_cpuidle_device = { .name = "cpuidle-davinci", .num_resources = ARRAY_SIZE(da8xx_cpuidle_resources), .resource = da8xx_cpuidle_resources, .dev = { .platform_data = &da8xx_cpuidle_pdata, }, }; int __init da8xx_register_cpuidle(void) { da8xx_cpuidle_pdata.ddr2_ctlr_base = da8xx_get_mem_ctlr(); return platform_device_register(&da8xx_cpuidle_device); }
gpl-2.0
cjdoucette/XIA-for-Linux
drivers/net/can/slcan.c
312
20048
/* * slcan.c - serial line CAN interface driver (using tty line discipline) * * This file is derived from linux/drivers/net/slip/slip.c * * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk> * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org> * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see http://www.gnu.org/licenses/gpl.html * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/skb.h> MODULE_ALIAS_LDISC(N_SLCAN); MODULE_DESCRIPTION("serial line CAN interface"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>"); #define SLCAN_MAGIC 0x53CA static int maxdev = 10; /* MAX number of SLCAN channels; This can be overridden with insmod slcan.ko maxdev=nnn */ module_param(maxdev, int, 0); MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces"); /* maximum rx buffer len: extended CAN frame with timestamp */ #define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) #define SLC_CMD_LEN 1 #define SLC_SFF_ID_LEN 3 #define SLC_EFF_ID_LEN 8 struct slcan { int magic; /* Various fields. */ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ spinlock_t lock; struct work_struct tx_work; /* Flushes transmit buffer */ /* These are pointers to the malloc()ed frame buffers. */ unsigned char rbuff[SLC_MTU]; /* receiver buffer */ int rcount; /* received chars counter */ unsigned char xbuff[SLC_MTU]; /* transmitter buffer */ unsigned char *xhead; /* pointer to next XMIT byte */ int xleft; /* bytes left in XMIT queue */ unsigned long flags; /* Flag values/ mode etc */ #define SLF_INUSE 0 /* Channel in use */ #define SLF_ERROR 1 /* Parity, etc. error */ }; static struct net_device **slcan_devs; /************************************************************************ * SLCAN ENCAPSULATION FORMAT * ************************************************************************/ /* * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended * frame format) a data length code (can_dlc) which can be from 0 to 8 * and up to <can_dlc> data bytes as payload. * Additionally a CAN frame may become a remote transmission frame if the * RTR-bit is set. This causes another ECU to send a CAN frame with the * given can_id. * * The SLCAN ASCII representation of these different frame types is: * <type> <id> <dlc> <data>* * * Extended frames (29 bit) are defined by capital characters in the type. * RTR frames are defined as 'r' types - normal frames have 't' type: * t => 11 bit data frame * r => 11 bit RTR frame * T => 29 bit data frame * R => 29 bit RTR frame * * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64). * The <dlc> is a one byte ASCII number ('0' - '8') * The <data> section has at much ASCII Hex bytes as defined by the <dlc> * * Examples: * * t1230 : can_id 0x123, can_dlc 0, no data * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33 * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55 * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request * */ /************************************************************************ * STANDARD SLCAN DECAPSULATION * ************************************************************************/ /* Send one completely decapsulated can_frame to the network layer */ static void slc_bump(struct slcan *sl) { struct sk_buff *skb; struct can_frame cf; int i, tmp; u32 tmpid; char *cmd = sl->rbuff; cf.can_id = 0; switch (*cmd) { case 'r': cf.can_id = CAN_RTR_FLAG; /* fallthrough */ case 't': /* store dlc ASCII value and terminate SFF CAN ID string */ cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1; break; case 'R': cf.can_id = CAN_RTR_FLAG; /* fallthrough */ case 'T': cf.can_id |= CAN_EFF_FLAG; /* store dlc ASCII value and terminate EFF CAN ID string */ cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN]; sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1; break; default: return; } if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid)) return; cf.can_id |= tmpid; /* get can_dlc from sanitized ASCII value */ if (cf.can_dlc >= '0' && cf.can_dlc < '9') cf.can_dlc -= '0'; else return; *(u64 *) (&cf.data) = 0; /* clear payload */ /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf.can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf.can_dlc; i++) { tmp = hex_to_bin(*cmd++); if (tmp < 0) return; cf.data[i] = (tmp << 4); tmp = hex_to_bin(*cmd++); if (tmp < 0) return; cf.data[i] |= tmp; } } skb = dev_alloc_skb(sizeof(struct can_frame) + sizeof(struct can_skb_priv)); if (!skb) return; skb->dev = sl->dev; skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; can_skb_reserve(skb); can_skb_prv(skb)->ifindex = sl->dev->ifindex; memcpy(skb_put(skb, sizeof(struct can_frame)), &cf, sizeof(struct can_frame)); netif_rx_ni(skb); sl->dev->stats.rx_packets++; sl->dev->stats.rx_bytes += cf.can_dlc; } /* parse tty input stream */ static void slcan_unesc(struct slcan *sl, unsigned char s) { if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 4)) { slc_bump(sl); } sl->rcount = 0; } else { if (!test_bit(SLF_ERROR, &sl->flags)) { if (sl->rcount < SLC_MTU) { sl->rbuff[sl->rcount++] = s; return; } else { sl->dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } } } /************************************************************************ * STANDARD SLCAN ENCAPSULATION * ************************************************************************/ /* Encapsulate one can_frame and stuff into a TTY queue. */ static void slc_encaps(struct slcan *sl, struct can_frame *cf) { int actual, i; unsigned char *pos; unsigned char *endpos; canid_t id = cf->can_id; pos = sl->xbuff; if (cf->can_id & CAN_RTR_FLAG) *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */ else *pos = 'T'; /* becomes 't' in standard frame format (SSF) */ /* determine number of chars for the CAN-identifier */ if (cf->can_id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; endpos = pos + SLC_EFF_ID_LEN; } else { *pos |= 0x20; /* convert R/T to lower case for SFF */ id &= CAN_SFF_MASK; endpos = pos + SLC_SFF_ID_LEN; } /* build 3 (SFF) or 8 (EFF) digit CAN identifier */ pos++; while (endpos >= pos) { *endpos-- = hex_asc_upper[id & 0xf]; id >>= 4; } pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN; *pos++ = cf->can_dlc + '0'; /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf->can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf->can_dlc; i++) pos = hex_byte_pack_upper(pos, cf->data[i]); } *pos++ = '\r'; /* Order of next two lines is *very* important. * When we are sending a little amount of data, * the transfer may be completed inside the ops->write() * routine, because it's running with interrupts enabled. * In this case we *never* got WRITE_WAKEUP event, * if we did not request it before write operation. * 14 Oct 1994 Dmitry Gorodchanin. */ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); sl->xleft = (pos - sl->xbuff) - actual; sl->xhead = sl->xbuff + actual; sl->dev->stats.tx_bytes += cf->can_dlc; } /* Write out any remaining transmit buffer. Scheduled when tty is writable */ static void slcan_transmit(struct work_struct *work) { struct slcan *sl = container_of(work, struct slcan, tx_work); int actual; spin_lock_bh(&sl->lock); /* First make sure we're connected. */ if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) { spin_unlock_bh(&sl->lock); return; } if (sl->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); spin_unlock_bh(&sl->lock); netif_wake_queue(sl->dev); return; } actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; spin_unlock_bh(&sl->lock); } /* * Called by the driver when there's room for more data. * Schedule the transmit. */ static void slcan_write_wakeup(struct tty_struct *tty) { struct slcan *sl = tty->disc_data; schedule_work(&sl->tx_work); } /* Send a can_frame to a TTY queue. */ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev) { struct slcan *sl = netdev_priv(dev); if (skb->len != sizeof(struct can_frame)) goto out; spin_lock(&sl->lock); if (!netif_running(dev)) { spin_unlock(&sl->lock); printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name); goto out; } if (sl->tty == NULL) { spin_unlock(&sl->lock); goto out; } netif_stop_queue(sl->dev); slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */ spin_unlock(&sl->lock); out: kfree_skb(skb); return NETDEV_TX_OK; } /****************************************** * Routines looking at netdevice side. ******************************************/ /* Netdevice UP -> DOWN routine */ static int slc_close(struct net_device *dev) { struct slcan *sl = netdev_priv(dev); spin_lock_bh(&sl->lock); if (sl->tty) { /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); } netif_stop_queue(dev); sl->rcount = 0; sl->xleft = 0; spin_unlock_bh(&sl->lock); return 0; } /* Netdevice DOWN -> UP routine */ static int slc_open(struct net_device *dev) { struct slcan *sl = netdev_priv(dev); if (sl->tty == NULL) return -ENODEV; sl->flags &= (1 << SLF_INUSE); netif_start_queue(dev); return 0; } /* Hook the destructor so we can free slcan devs at the right point in time */ static void slc_free_netdev(struct net_device *dev) { int i = dev->base_addr; free_netdev(dev); slcan_devs[i] = NULL; } static int slcan_change_mtu(struct net_device *dev, int new_mtu) { return -EINVAL; } static const struct net_device_ops slc_netdev_ops = { .ndo_open = slc_open, .ndo_stop = slc_close, .ndo_start_xmit = slc_xmit, .ndo_change_mtu = slcan_change_mtu, }; static void slc_setup(struct net_device *dev) { dev->netdev_ops = &slc_netdev_ops; dev->destructor = slc_free_netdev; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 10; dev->mtu = sizeof(struct can_frame); dev->type = ARPHRD_CAN; /* New-style flags. */ dev->flags = IFF_NOARP; dev->features = NETIF_F_HW_CSUM; } /****************************************** Routines looking at TTY side. ******************************************/ /* * Handle the 'receiver data ready' interrupt. * This function is called by the 'tty_io' module in the kernel when * a block of SLCAN data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. This will not * be re-entered while running but other ldisc functions may be called * in parallel */ static void slcan_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct slcan *sl = (struct slcan *) tty->disc_data; if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) return; /* Read the characters out of the buffer */ while (count--) { if (fp && *fp++) { if (!test_and_set_bit(SLF_ERROR, &sl->flags)) sl->dev->stats.rx_errors++; cp++; continue; } slcan_unesc(sl, *cp++); } } /************************************ * slcan_open helper routines. ************************************/ /* Collect hanged up channels */ static void slc_sync(void) { int i; struct net_device *dev; struct slcan *sl; for (i = 0; i < maxdev; i++) { dev = slcan_devs[i]; if (dev == NULL) break; sl = netdev_priv(dev); if (sl->tty) continue; if (dev->flags & IFF_UP) dev_close(dev); } } /* Find a free SLCAN channel, and link in this `tty' line. */ static struct slcan *slc_alloc(dev_t line) { int i; char name[IFNAMSIZ]; struct net_device *dev = NULL; struct slcan *sl; for (i = 0; i < maxdev; i++) { dev = slcan_devs[i]; if (dev == NULL) break; } /* Sorry, too many, all slots in use */ if (i >= maxdev) return NULL; sprintf(name, "slcan%d", i); dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, slc_setup); if (!dev) return NULL; dev->base_addr = i; sl = netdev_priv(dev); /* Initialize channel control data */ sl->magic = SLCAN_MAGIC; sl->dev = dev; spin_lock_init(&sl->lock); INIT_WORK(&sl->tx_work, slcan_transmit); slcan_devs[i] = dev; return sl; } /* * Open the high-level part of the SLCAN channel. * This function is called by the TTY module when the * SLCAN line discipline is called for. Because we are * sure the tty line exists, we only have to link it to * a free SLCAN channel... * * Called in process context serialized from other ldisc calls. */ static int slcan_open(struct tty_struct *tty) { struct slcan *sl; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EOPNOTSUPP; /* RTnetlink lock is misused here to serialize concurrent opens of slcan channels. There are better ways, but it is the simplest one. */ rtnl_lock(); /* Collect hanged up channels. */ slc_sync(); sl = tty->disc_data; err = -EEXIST; /* First make sure we're not already connected. */ if (sl && sl->magic == SLCAN_MAGIC) goto err_exit; /* OK. Find a free SLCAN channel to use. */ err = -ENFILE; sl = slc_alloc(tty_devnum(tty)); if (sl == NULL) goto err_exit; sl->tty = tty; tty->disc_data = sl; if (!test_bit(SLF_INUSE, &sl->flags)) { /* Perform the low-level SLCAN initialization. */ sl->rcount = 0; sl->xleft = 0; set_bit(SLF_INUSE, &sl->flags); err = register_netdevice(sl->dev); if (err) goto err_free_chan; } /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ /* TTY layer expects 0 on success */ return 0; err_free_chan: sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); err_exit: rtnl_unlock(); /* Count references from TTY module */ return err; } /* * Close down a SLCAN channel. * This means flushing out any pending queues, and then returning. This * call is serialized against other ldisc functions. * * We also use this method for a hangup event. */ static void slcan_close(struct tty_struct *tty) { struct slcan *sl = (struct slcan *) tty->disc_data; /* First make sure we're connected. */ if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty) return; spin_lock_bh(&sl->lock); tty->disc_data = NULL; sl->tty = NULL; spin_unlock_bh(&sl->lock); flush_work(&sl->tx_work); /* Flush network side */ unregister_netdev(sl->dev); /* This will complete via sl_free_netdev */ } static int slcan_hangup(struct tty_struct *tty) { slcan_close(tty); return 0; } /* Perform I/O control on an active SLCAN channel. */ static int slcan_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct slcan *sl = (struct slcan *) tty->disc_data; unsigned int tmp; /* First make sure we're connected. */ if (!sl || sl->magic != SLCAN_MAGIC) return -EINVAL; switch (cmd) { case SIOCGIFNAME: tmp = strlen(sl->dev->name) + 1; if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) return -EFAULT; return 0; case SIOCSIFHWADDR: return -EINVAL; default: return tty_mode_ioctl(tty, file, cmd, arg); } } static struct tty_ldisc_ops slc_ldisc = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = "slcan", .open = slcan_open, .close = slcan_close, .hangup = slcan_hangup, .ioctl = slcan_ioctl, .receive_buf = slcan_receive_buf, .write_wakeup = slcan_write_wakeup, }; static int __init slcan_init(void) { int status; if (maxdev < 4) maxdev = 4; /* Sanity */ pr_info("slcan: serial line CAN interface driver\n"); pr_info("slcan: %d dynamic interface channels.\n", maxdev); slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL); if (!slcan_devs) return -ENOMEM; /* Fill in our line protocol discipline, and register it */ status = tty_register_ldisc(N_SLCAN, &slc_ldisc); if (status) { printk(KERN_ERR "slcan: can't register line discipline\n"); kfree(slcan_devs); } return status; } static void __exit slcan_exit(void) { int i; struct net_device *dev; struct slcan *sl; unsigned long timeout = jiffies + HZ; int busy = 0; if (slcan_devs == NULL) return; /* First of all: check for active disciplines and hangup them. */ do { if (busy) msleep_interruptible(100); busy = 0; for (i = 0; i < maxdev; i++) { dev = slcan_devs[i]; if (!dev) continue; sl = netdev_priv(dev); spin_lock_bh(&sl->lock); if (sl->tty) { busy++; tty_hangup(sl->tty); } spin_unlock_bh(&sl->lock); } } while (busy && time_before(jiffies, timeout)); /* FIXME: hangup is async so we should wait when doing this second phase */ for (i = 0; i < maxdev; i++) { dev = slcan_devs[i]; if (!dev) continue; slcan_devs[i] = NULL; sl = netdev_priv(dev); if (sl->tty) { printk(KERN_ERR "%s: tty discipline still running\n", dev->name); /* Intentionally leak the control block. */ dev->destructor = NULL; } unregister_netdev(dev); } kfree(slcan_devs); slcan_devs = NULL; i = tty_unregister_ldisc(N_SLCAN); if (i) printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i); } module_init(slcan_init); module_exit(slcan_exit);
gpl-2.0
djbw/linux
fs/adfs/dir_f.c
568
10608
/* * linux/fs/adfs/dir_f.c * * Copyright (C) 1997-1999 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * E and F format directory handling */ #include <linux/buffer_head.h> #include "adfs.h" #include "dir_f.h" static void adfs_f_free(struct adfs_dir *dir); /* * Read an (unaligned) value of length 1..4 bytes */ static inline unsigned int adfs_readval(unsigned char *p, int len) { unsigned int val = 0; switch (len) { case 4: val |= p[3] << 24; case 3: val |= p[2] << 16; case 2: val |= p[1] << 8; default: val |= p[0]; } return val; } static inline void adfs_writeval(unsigned char *p, int len, unsigned int val) { switch (len) { case 4: p[3] = val >> 24; case 3: p[2] = val >> 16; case 2: p[1] = val >> 8; default: p[0] = val; } } static inline int adfs_readname(char *buf, char *ptr, int maxlen) { char *old_buf = buf; while ((unsigned char)*ptr >= ' ' && maxlen--) { if (*ptr == '/') *buf++ = '.'; else *buf++ = *ptr; ptr++; } return buf - old_buf; } #define ror13(v) ((v >> 13) | (v << 19)) #define dir_u8(idx) \ ({ int _buf = idx >> blocksize_bits; \ int _off = idx - (_buf << blocksize_bits);\ *(u8 *)(bh[_buf]->b_data + _off); \ }) #define dir_u32(idx) \ ({ int _buf = idx >> blocksize_bits; \ int _off = idx - (_buf << blocksize_bits);\ *(__le32 *)(bh[_buf]->b_data + _off); \ }) #define bufoff(_bh,_idx) \ ({ int _buf = _idx >> blocksize_bits; \ int _off = _idx - (_buf << blocksize_bits);\ (u8 *)(_bh[_buf]->b_data + _off); \ }) /* * There are some algorithms that are nice in * assembler, but a bitch in C... This is one * of them. */ static u8 adfs_dir_checkbyte(const struct adfs_dir *dir) { struct buffer_head * const *bh = dir->bh; const int blocksize_bits = dir->sb->s_blocksize_bits; union { __le32 *ptr32; u8 *ptr8; } ptr, end; u32 dircheck = 0; int last = 5 - 26; int i = 0; /* * Accumulate each word up to the last whole * word of the last directory entry. This * can spread across several buffer heads. */ do { last += 26; do { dircheck = le32_to_cpu(dir_u32(i)) ^ ror13(dircheck); i += sizeof(u32); } while (i < (last & ~3)); } while (dir_u8(last) != 0); /* * Accumulate the last few bytes. These * bytes will be within the same bh. */ if (i != last) { ptr.ptr8 = bufoff(bh, i); end.ptr8 = ptr.ptr8 + last - i; do { dircheck = *ptr.ptr8++ ^ ror13(dircheck); } while (ptr.ptr8 < end.ptr8); } /* * The directory tail is in the final bh * Note that contary to the RISC OS PRMs, * the first few bytes are NOT included * in the check. All bytes are in the * same bh. */ ptr.ptr8 = bufoff(bh, 2008); end.ptr8 = ptr.ptr8 + 36; do { __le32 v = *ptr.ptr32++; dircheck = le32_to_cpu(v) ^ ror13(dircheck); } while (ptr.ptr32 < end.ptr32); return (dircheck ^ (dircheck >> 8) ^ (dircheck >> 16) ^ (dircheck >> 24)) & 0xff; } /* * Read and check that a directory is valid */ static int adfs_dir_read(struct super_block *sb, unsigned long object_id, unsigned int size, struct adfs_dir *dir) { const unsigned int blocksize_bits = sb->s_blocksize_bits; int blk = 0; /* * Directories which are not a multiple of 2048 bytes * are considered bad v2 [3.6] */ if (size & 2047) goto bad_dir; size >>= blocksize_bits; dir->nr_buffers = 0; dir->sb = sb; for (blk = 0; blk < size; blk++) { int phys; phys = __adfs_block_map(sb, object_id, blk); if (!phys) { adfs_error(sb, "dir object %lX has a hole at offset %d", object_id, blk); goto release_buffers; } dir->bh[blk] = sb_bread(sb, phys); if (!dir->bh[blk]) goto release_buffers; } memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead)); memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail)); if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq || memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4)) goto bad_dir; if (memcmp(&dir->dirhead.startname, "Nick", 4) && memcmp(&dir->dirhead.startname, "Hugo", 4)) goto bad_dir; if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte) goto bad_dir; dir->nr_buffers = blk; return 0; bad_dir: adfs_error(sb, "corrupted directory fragment %lX", object_id); release_buffers: for (blk -= 1; blk >= 0; blk -= 1) brelse(dir->bh[blk]); dir->sb = NULL; return -EIO; } /* * convert a disk-based directory entry to a Linux ADFS directory entry */ static inline void adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj, struct adfs_direntry *de) { obj->name_len = adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN); obj->file_id = adfs_readval(de->dirinddiscadd, 3); obj->loadaddr = adfs_readval(de->dirload, 4); obj->execaddr = adfs_readval(de->direxec, 4); obj->size = adfs_readval(de->dirlen, 4); obj->attr = de->newdiratts; obj->filetype = -1; /* * object is a file and is filetyped and timestamped? * RISC OS 12-bit filetype is stored in load_address[19:8] */ if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) && (0xfff00000 == (0xfff00000 & obj->loadaddr))) { obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8); /* optionally append the ,xyz hex filetype suffix */ if (ADFS_SB(dir->sb)->s_ftsuffix) obj->name_len += append_filetype_suffix( &obj->name[obj->name_len], obj->filetype); } } /* * convert a Linux ADFS directory entry to a disk-based directory entry */ static inline void adfs_obj2dir(struct adfs_direntry *de, struct object_info *obj) { adfs_writeval(de->dirinddiscadd, 3, obj->file_id); adfs_writeval(de->dirload, 4, obj->loadaddr); adfs_writeval(de->direxec, 4, obj->execaddr); adfs_writeval(de->dirlen, 4, obj->size); de->newdiratts = obj->attr; } /* * get a directory entry. Note that the caller is responsible * for holding the relevant locks. */ static int __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj) { struct super_block *sb = dir->sb; struct adfs_direntry de; int thissize, buffer, offset; buffer = pos >> sb->s_blocksize_bits; if (buffer > dir->nr_buffers) return -EINVAL; offset = pos & (sb->s_blocksize - 1); thissize = sb->s_blocksize - offset; if (thissize > 26) thissize = 26; memcpy(&de, dir->bh[buffer]->b_data + offset, thissize); if (thissize != 26) memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data, 26 - thissize); if (!de.dirobname[0]) return -ENOENT; adfs_dir2obj(dir, obj, &de); return 0; } static int __adfs_dir_put(struct adfs_dir *dir, int pos, struct object_info *obj) { struct super_block *sb = dir->sb; struct adfs_direntry de; int thissize, buffer, offset; buffer = pos >> sb->s_blocksize_bits; if (buffer > dir->nr_buffers) return -EINVAL; offset = pos & (sb->s_blocksize - 1); thissize = sb->s_blocksize - offset; if (thissize > 26) thissize = 26; /* * Get the entry in total */ memcpy(&de, dir->bh[buffer]->b_data + offset, thissize); if (thissize != 26) memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data, 26 - thissize); /* * update it */ adfs_obj2dir(&de, obj); /* * Put the new entry back */ memcpy(dir->bh[buffer]->b_data + offset, &de, thissize); if (thissize != 26) memcpy(dir->bh[buffer + 1]->b_data, ((char *)&de) + thissize, 26 - thissize); return 0; } /* * the caller is responsible for holding the necessary * locks. */ static int adfs_dir_find_entry(struct adfs_dir *dir, unsigned long object_id) { int pos, ret; ret = -ENOENT; for (pos = 5; pos < ADFS_NUM_DIR_ENTRIES * 26 + 5; pos += 26) { struct object_info obj; if (!__adfs_dir_get(dir, pos, &obj)) break; if (obj.file_id == object_id) { ret = pos; break; } } return ret; } static int adfs_f_read(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir) { int ret; if (sz != ADFS_NEWDIR_SIZE) return -EIO; ret = adfs_dir_read(sb, id, sz, dir); if (ret) adfs_error(sb, "unable to read directory"); else dir->parent_id = adfs_readval(dir->dirtail.new.dirparent, 3); return ret; } static int adfs_f_setpos(struct adfs_dir *dir, unsigned int fpos) { if (fpos >= ADFS_NUM_DIR_ENTRIES) return -ENOENT; dir->pos = 5 + fpos * 26; return 0; } static int adfs_f_getnext(struct adfs_dir *dir, struct object_info *obj) { unsigned int ret; ret = __adfs_dir_get(dir, dir->pos, obj); if (ret == 0) dir->pos += 26; return ret; } static int adfs_f_update(struct adfs_dir *dir, struct object_info *obj) { struct super_block *sb = dir->sb; int ret, i; ret = adfs_dir_find_entry(dir, obj->file_id); if (ret < 0) { adfs_error(dir->sb, "unable to locate entry to update"); goto out; } __adfs_dir_put(dir, ret, obj); /* * Increment directory sequence number */ dir->bh[0]->b_data[0] += 1; dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 6] += 1; ret = adfs_dir_checkbyte(dir); /* * Update directory check byte */ dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 1] = ret; #if 1 { const unsigned int blocksize_bits = sb->s_blocksize_bits; memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead)); memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail)); if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq || memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4)) goto bad_dir; if (memcmp(&dir->dirhead.startname, "Nick", 4) && memcmp(&dir->dirhead.startname, "Hugo", 4)) goto bad_dir; if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte) goto bad_dir; } #endif for (i = dir->nr_buffers - 1; i >= 0; i--) mark_buffer_dirty(dir->bh[i]); ret = 0; out: return ret; #if 1 bad_dir: adfs_error(dir->sb, "whoops! I broke a directory!"); return -EIO; #endif } static int adfs_f_sync(struct adfs_dir *dir) { int err = 0; int i; for (i = dir->nr_buffers - 1; i >= 0; i--) { struct buffer_head *bh = dir->bh[i]; sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) err = -EIO; } return err; } static void adfs_f_free(struct adfs_dir *dir) { int i; for (i = dir->nr_buffers - 1; i >= 0; i--) { brelse(dir->bh[i]); dir->bh[i] = NULL; } dir->nr_buffers = 0; dir->sb = NULL; } const struct adfs_dir_ops adfs_f_dir_ops = { .read = adfs_f_read, .setpos = adfs_f_setpos, .getnext = adfs_f_getnext, .update = adfs_f_update, .sync = adfs_f_sync, .free = adfs_f_free };
gpl-2.0
jayk/linux
fs/cifs/sess.c
824
39709
/* * fs/cifs/sess.c * * SMB/CIFS session setup handling routines * * Copyright (c) International Business Machines Corp., 2006, 2009 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "ntlmssp.h" #include "nterr.h" #include <linux/utsname.h> #include <linux/slab.h> #include "cifs_spnego.h" static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) { __u32 capabilities = 0; /* init fields common to all four types of SessSetup */ /* Note that offsets for first seven fields in req struct are same */ /* in CIFS Specs so does not matter which of 3 forms of struct */ /* that we use in next few lines */ /* Note that header is initialized to zero in header_assemble */ pSMB->req.AndXCommand = 0xFF; pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32, CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4, USHRT_MAX)); pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); pSMB->req.VcNumber = cpu_to_le16(1); /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ /* BB verify whether signing required on neg or just on auth frame (and NTLM case) */ capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; if (ses->server->sign) pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_UNICODE) { pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE; capabilities |= CAP_UNICODE; } if (ses->capabilities & CAP_STATUS32) { pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS; capabilities |= CAP_STATUS32; } if (ses->capabilities & CAP_DFS) { pSMB->req.hdr.Flags2 |= SMBFLG2_DFS; capabilities |= CAP_DFS; } if (ses->capabilities & CAP_UNIX) capabilities |= CAP_UNIX; return capabilities; } static void unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; int bytes_ret = 0; /* Copy OS version */ bytes_ret = cifs_strtoUTF16((__le16 *)bcc_ptr, "Linux version ", 32, nls_cp); bcc_ptr += 2 * bytes_ret; bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, init_utsname()->release, 32, nls_cp); bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* trailing null */ bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS, 32, nls_cp); bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* trailing null */ *pbcc_area = bcc_ptr; } static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; int bytes_ret = 0; /* copy domain */ if (ses->domainName == NULL) { /* Sending null domain better than using a bogus domain name (as we did briefly in 2.6.18) since server will use its default */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; bytes_ret = 0; } else bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN, nls_cp); bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null terminator */ *pbcc_area = bcc_ptr; } static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; int bytes_ret = 0; /* BB FIXME add check that strings total less than 335 or will need to send them as arrays */ /* unicode strings, must be word aligned before the call */ /* if ((long) bcc_ptr % 2) { *bcc_ptr = 0; bcc_ptr++; } */ /* copy user */ if (ses->user_name == NULL) { /* null user mount */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; } else { bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN, nls_cp); } bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null termination */ unicode_domain_string(&bcc_ptr, ses, nls_cp); unicode_oslm_strings(&bcc_ptr, nls_cp); *pbcc_area = bcc_ptr; } static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { char *bcc_ptr = *pbcc_area; /* copy user */ /* BB what about null user mounts - check that we do this BB */ /* copy user */ if (ses->user_name != NULL) { strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN); bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN); } /* else null user mount */ *bcc_ptr = 0; bcc_ptr++; /* account for null termination */ /* copy domain */ if (ses->domainName != NULL) { strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); } /* else we will send a null domain name so the server will default to its own domain */ *bcc_ptr = 0; bcc_ptr++; /* BB check for overflow here */ strcpy(bcc_ptr, "Linux version "); bcc_ptr += strlen("Linux version "); strcpy(bcc_ptr, init_utsname()->release); bcc_ptr += strlen(init_utsname()->release) + 1; strcpy(bcc_ptr, CIFS_NETWORK_OPSYS); bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1; *pbcc_area = bcc_ptr; } static void decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses, const struct nls_table *nls_cp) { int len; char *data = *pbcc_area; cifs_dbg(FYI, "bleft %d\n", bleft); kfree(ses->serverOS); ses->serverOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp); cifs_dbg(FYI, "serverOS=%s\n", ses->serverOS); len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; data += len; bleft -= len; if (bleft <= 0) return; kfree(ses->serverNOS); ses->serverNOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp); cifs_dbg(FYI, "serverNOS=%s\n", ses->serverNOS); len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2; data += len; bleft -= len; if (bleft <= 0) return; kfree(ses->serverDomain); ses->serverDomain = cifs_strndup_from_utf16(data, bleft, true, nls_cp); cifs_dbg(FYI, "serverDomain=%s\n", ses->serverDomain); return; } static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft, struct cifs_ses *ses, const struct nls_table *nls_cp) { int len; char *bcc_ptr = *pbcc_area; cifs_dbg(FYI, "decode sessetup ascii. bleft %d\n", bleft); len = strnlen(bcc_ptr, bleft); if (len >= bleft) return; kfree(ses->serverOS); ses->serverOS = kzalloc(len + 1, GFP_KERNEL); if (ses->serverOS) { strncpy(ses->serverOS, bcc_ptr, len); if (strncmp(ses->serverOS, "OS/2", 4) == 0) cifs_dbg(FYI, "OS/2 server\n"); } bcc_ptr += len + 1; bleft -= len + 1; len = strnlen(bcc_ptr, bleft); if (len >= bleft) return; kfree(ses->serverNOS); ses->serverNOS = kzalloc(len + 1, GFP_KERNEL); if (ses->serverNOS) strncpy(ses->serverNOS, bcc_ptr, len); bcc_ptr += len + 1; bleft -= len + 1; len = strnlen(bcc_ptr, bleft); if (len > bleft) return; /* No domain field in LANMAN case. Domain is returned by old servers in the SMB negprot response */ /* BB For newer servers which do not support Unicode, but thus do return domain here we could add parsing for it later, but it is not very important */ cifs_dbg(FYI, "ascii: bytes left %d\n", bleft); } int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses) { unsigned int tioffset; /* challenge message target info area */ unsigned int tilen; /* challenge message target info area length */ CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; if (blob_len < sizeof(CHALLENGE_MESSAGE)) { cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len); return -EINVAL; } if (memcmp(pblob->Signature, "NTLMSSP", 8)) { cifs_dbg(VFS, "blob signature incorrect %s\n", pblob->Signature); return -EINVAL; } if (pblob->MessageType != NtLmChallenge) { cifs_dbg(VFS, "Incorrect message type %d\n", pblob->MessageType); return -EINVAL; } memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE); /* BB we could decode pblob->NegotiateFlags; some may be useful */ /* In particular we can examine sign flags */ /* BB spec says that if AvId field of MsvAvTimestamp is populated then we must set the MIC field of the AUTHENTICATE_MESSAGE */ ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); tilen = le16_to_cpu(pblob->TargetInfoArray.Length); if (tioffset > blob_len || tioffset + tilen > blob_len) { cifs_dbg(VFS, "tioffset + tilen too high %u + %u", tioffset, tilen); return -EINVAL; } if (tilen) { ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Challenge target info alloc failure"); return -ENOMEM; } ses->auth_key.len = tilen; } return 0; } /* BB Move to ntlmssp.c eventually */ /* We do not malloc the blob, it is passed in pbuffer, because it is fixed size, and small, making this approach cleaner */ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses) { NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer; __u32 flags; memset(pbuffer, 0, sizeof(NEGOTIATE_MESSAGE)); memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmNegotiate; /* BB is NTLMV2 session security format easier to use here? */ flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; if (ses->server->sign) { flags |= NTLMSSP_NEGOTIATE_SIGN; if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; } sec_blob->NegotiateFlags = cpu_to_le32(flags); sec_blob->WorkstationName.BufferOffset = 0; sec_blob->WorkstationName.Length = 0; sec_blob->WorkstationName.MaximumLength = 0; /* Domain name is sent on the Challenge not Negotiate NTLMSSP request */ sec_blob->DomainName.BufferOffset = 0; sec_blob->DomainName.Length = 0; sec_blob->DomainName.MaximumLength = 0; } /* We do not malloc the blob, it is passed in pbuffer, because its maximum possible size is fixed and small, making this approach cleaner. This function returns the length of the data in the blob */ int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen, struct cifs_ses *ses, const struct nls_table *nls_cp) { int rc; AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; __u32 flags; unsigned char *tmp; memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmAuthenticate; flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; if (ses->server->sign) { flags |= NTLMSSP_NEGOTIATE_SIGN; if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; } tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); sec_blob->NegotiateFlags = cpu_to_le32(flags); sec_blob->LmChallengeResponse.BufferOffset = cpu_to_le32(sizeof(AUTHENTICATE_MESSAGE)); sec_blob->LmChallengeResponse.Length = 0; sec_blob->LmChallengeResponse.MaximumLength = 0; sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); rc = setup_ntlmv2_rsp(ses, nls_cp); if (rc) { cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc); goto setup_ntlmv2_ret; } memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE, ses->auth_key.len - CIFS_SESS_KEY_SIZE); tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE; sec_blob->NtChallengeResponse.Length = cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); sec_blob->NtChallengeResponse.MaximumLength = cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); if (ses->domainName == NULL) { sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->DomainName.Length = 0; sec_blob->DomainName.MaximumLength = 0; tmp += 2; } else { int len; len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName, CIFS_MAX_USERNAME_LEN, nls_cp); len *= 2; /* unicode is 2 bytes each */ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->DomainName.Length = cpu_to_le16(len); sec_blob->DomainName.MaximumLength = cpu_to_le16(len); tmp += len; } if (ses->user_name == NULL) { sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->UserName.Length = 0; sec_blob->UserName.MaximumLength = 0; tmp += 2; } else { int len; len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name, CIFS_MAX_USERNAME_LEN, nls_cp); len *= 2; /* unicode is 2 bytes each */ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->UserName.Length = cpu_to_le16(len); sec_blob->UserName.MaximumLength = cpu_to_le16(len); tmp += len; } sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->WorkstationName.Length = 0; sec_blob->WorkstationName.MaximumLength = 0; tmp += 2; if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) || (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) && !calc_seckey(ses)) { memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); sec_blob->SessionKey.MaximumLength = cpu_to_le16(CIFS_CPHTXT_SIZE); tmp += CIFS_CPHTXT_SIZE; } else { sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->SessionKey.Length = 0; sec_blob->SessionKey.MaximumLength = 0; } setup_ntlmv2_ret: *buflen = tmp - pbuffer; return rc; } enum securityEnum select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) { switch (server->negflavor) { case CIFS_NEGFLAVOR_EXTENDED: switch (requested) { case Kerberos: case RawNTLMSSP: return requested; case Unspecified: if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; /* Fallthrough */ default: return Unspecified; } case CIFS_NEGFLAVOR_UNENCAP: switch (requested) { case NTLM: case NTLMv2: return requested; case Unspecified: if (global_secflags & CIFSSEC_MAY_NTLMV2) return NTLMv2; if (global_secflags & CIFSSEC_MAY_NTLM) return NTLM; default: /* Fallthrough to attempt LANMAN authentication next */ break; } case CIFS_NEGFLAVOR_LANMAN: switch (requested) { case LANMAN: return requested; case Unspecified: if (global_secflags & CIFSSEC_MAY_LANMAN) return LANMAN; /* Fallthrough */ default: return Unspecified; } default: return Unspecified; } } struct sess_data { unsigned int xid; struct cifs_ses *ses; struct nls_table *nls_cp; void (*func)(struct sess_data *); int result; /* we will send the SMB in three pieces: * a fixed length beginning part, an optional * SPNEGO blob (which can be zero length), and a * last part which will include the strings * and rest of bcc area. This allows us to avoid * a large buffer 17K allocation */ int buf0_type; struct kvec iov[3]; }; static int sess_alloc_buffer(struct sess_data *sess_data, int wct) { int rc; struct cifs_ses *ses = sess_data->ses; struct smb_hdr *smb_buf; rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses, (void **)&smb_buf); if (rc) return rc; sess_data->iov[0].iov_base = (char *)smb_buf; sess_data->iov[0].iov_len = be32_to_cpu(smb_buf->smb_buf_length) + 4; /* * This variable will be used to clear the buffer * allocated above in case of any error in the calling function. */ sess_data->buf0_type = CIFS_SMALL_BUFFER; /* 2000 big enough to fit max user, domain, NOS name etc. */ sess_data->iov[2].iov_base = kmalloc(2000, GFP_KERNEL); if (!sess_data->iov[2].iov_base) { rc = -ENOMEM; goto out_free_smb_buf; } return 0; out_free_smb_buf: kfree(smb_buf); sess_data->iov[0].iov_base = NULL; sess_data->iov[0].iov_len = 0; sess_data->buf0_type = CIFS_NO_BUFFER; return rc; } static void sess_free_buffer(struct sess_data *sess_data) { free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base); sess_data->buf0_type = CIFS_NO_BUFFER; kfree(sess_data->iov[2].iov_base); } static int sess_establish_session(struct sess_data *sess_data) { struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (!ses->server->session_estab) { if (ses->server->sign) { ses->server->session_key.response = kmemdup(ses->auth_key.response, ses->auth_key.len, GFP_KERNEL); if (!ses->server->session_key.response) { mutex_unlock(&ses->server->srv_mutex); return -ENOMEM; } ses->server->session_key.len = ses->auth_key.len; } ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "CIFS session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); return 0; } static int sess_sendreceive(struct sess_data *sess_data) { int rc; struct smb_hdr *smb_buf = (struct smb_hdr *) sess_data->iov[0].iov_base; __u16 count; count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len; smb_buf->smb_buf_length = cpu_to_be32(be32_to_cpu(smb_buf->smb_buf_length) + count); put_bcc(count, smb_buf); rc = SendReceive2(sess_data->xid, sess_data->ses, sess_data->iov, 3 /* num_iovecs */, &sess_data->buf0_type, CIFS_LOG_ERROR); return rc; } /* * LANMAN and plaintext are less secure and off by default. * So we make this explicitly be turned on in kconfig (in the * build) and turned on at runtime (changed from the default) * in proc/fs/cifs or via mount parm. Unfortunately this is * needed for old Win (e.g. Win95), some obscure NAS and OS/2 */ #ifdef CONFIG_CIFS_WEAK_PW_HASH static void sess_auth_lanman(struct sess_data *sess_data) { int rc = 0; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; char *bcc_ptr; struct cifs_ses *ses = sess_data->ses; char lnm_session_key[CIFS_AUTH_RESP_SIZE]; __u32 capabilities; __u16 bytes_remaining; /* lanman 2 style sessionsetup */ /* wct = 10 */ rc = sess_alloc_buffer(sess_data, 10); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; bcc_ptr = sess_data->iov[2].iov_base; capabilities = cifs_ssetup_hdr(ses, pSMB); pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; /* no capabilities flags in old lanman negotiation */ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* Calculate hash with password and copy into bcc_ptr. * Encryption Key (stored as in cryptkey) gets used if the * security mode bit in Negottiate Protocol response states * to use challenge/response method (i.e. Password bit is 1). */ rc = calc_lanman_hash(ses->password, ses->server->cryptkey, ses->server->sec_mode & SECMODE_PW_ENCRYPT ? true : false, lnm_session_key); memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); bcc_ptr += CIFS_AUTH_RESP_SIZE; /* * can not sign if LANMAN negotiated so no need * to calculate signing key? but what if server * changed to do higher than lanman dialect and * we reconnected would we ever calc signing_key? */ cifs_dbg(FYI, "Negotiating LANMAN setting up strings\n"); /* Unicode not allowed for LANMAN dialects */ ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; rc = sess_sendreceive(sess_data); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; /* lanman response has a word count of 3 */ if (smb_buf->WordCount != 3) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } rc = sess_establish_session(sess_data); out: sess_data->result = rc; sess_data->func = NULL; sess_free_buffer(sess_data); } #endif static void sess_auth_ntlm(struct sess_data *sess_data) { int rc = 0; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; char *bcc_ptr; struct cifs_ses *ses = sess_data->ses; __u32 capabilities; __u16 bytes_remaining; /* old style NTLM sessionsetup */ /* wct = 13 */ rc = sess_alloc_buffer(sess_data, 13); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; bcc_ptr = sess_data->iov[2].iov_base; capabilities = cifs_ssetup_hdr(ses, pSMB); pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); pSMB->req_no_secext.CaseInsensitivePasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); pSMB->req_no_secext.CaseSensitivePasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* calculate ntlm response and session key */ rc = setup_ntlm_response(ses, sess_data->nls_cp); if (rc) { cifs_dbg(VFS, "Error %d during NTLM authentication\n", rc); goto out; } /* copy ntlm response */ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, CIFS_AUTH_RESP_SIZE); bcc_ptr += CIFS_AUTH_RESP_SIZE; memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, CIFS_AUTH_RESP_SIZE); bcc_ptr += CIFS_AUTH_RESP_SIZE; if (ses->capabilities & CAP_UNICODE) { /* unicode strings must be word aligned */ if (sess_data->iov[0].iov_len % 2) { *bcc_ptr = 0; bcc_ptr++; } unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } else { ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; rc = sess_sendreceive(sess_data); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; if (smb_buf->WordCount != 3) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } rc = sess_establish_session(sess_data); out: sess_data->result = rc; sess_data->func = NULL; sess_free_buffer(sess_data); kfree(ses->auth_key.response); ses->auth_key.response = NULL; } static void sess_auth_ntlmv2(struct sess_data *sess_data) { int rc = 0; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; char *bcc_ptr; struct cifs_ses *ses = sess_data->ses; __u32 capabilities; __u16 bytes_remaining; /* old style NTLM sessionsetup */ /* wct = 13 */ rc = sess_alloc_buffer(sess_data, 13); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; bcc_ptr = sess_data->iov[2].iov_base; capabilities = cifs_ssetup_hdr(ses, pSMB); pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); /* LM2 password would be here if we supported it */ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; /* calculate nlmv2 response and session key */ rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp); if (rc) { cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc); goto out; } memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, ses->auth_key.len - CIFS_SESS_KEY_SIZE); bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE; /* set case sensitive password length after tilen may get * assigned, tilen is 0 otherwise. */ pSMB->req_no_secext.CaseSensitivePasswordLength = cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); if (ses->capabilities & CAP_UNICODE) { if (sess_data->iov[0].iov_len % 2) { *bcc_ptr = 0; bcc_ptr++; } unicode_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } else { ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; rc = sess_sendreceive(sess_data); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; if (smb_buf->WordCount != 3) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } rc = sess_establish_session(sess_data); out: sess_data->result = rc; sess_data->func = NULL; sess_free_buffer(sess_data); kfree(ses->auth_key.response); ses->auth_key.response = NULL; } #ifdef CONFIG_CIFS_UPCALL static void sess_auth_kerberos(struct sess_data *sess_data) { int rc = 0; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; char *bcc_ptr; struct cifs_ses *ses = sess_data->ses; __u32 capabilities; __u16 bytes_remaining; struct key *spnego_key = NULL; struct cifs_spnego_msg *msg; u16 blob_len; /* extended security */ /* wct = 12 */ rc = sess_alloc_buffer(sess_data, 12); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; bcc_ptr = sess_data->iov[2].iov_base; capabilities = cifs_ssetup_hdr(ses, pSMB); spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); spnego_key = NULL; goto out; } msg = spnego_key->payload.data; /* * check version field to make sure that cifs.upcall is * sending us a response in an expected form */ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) { cifs_dbg(VFS, "incorrect version of cifs.upcall (expected %d but got %d)", CIFS_SPNEGO_UPCALL_VERSION, msg->version); rc = -EKEYREJECTED; goto out_put_spnego_key; } ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, GFP_KERNEL); if (!ses->auth_key.response) { cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory", msg->sesskey_len); rc = -ENOMEM; goto out_put_spnego_key; } ses->auth_key.len = msg->sesskey_len; pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; capabilities |= CAP_EXTENDED_SECURITY; pSMB->req.Capabilities = cpu_to_le32(capabilities); sess_data->iov[1].iov_base = msg->data + msg->sesskey_len; sess_data->iov[1].iov_len = msg->secblob_len; pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len); if (ses->capabilities & CAP_UNICODE) { /* unicode strings must be word aligned */ if ((sess_data->iov[0].iov_len + sess_data->iov[1].iov_len) % 2) { *bcc_ptr = 0; bcc_ptr++; } unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp); } else { /* BB: is this right? */ ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp); } sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; rc = sess_sendreceive(sess_data); if (rc) goto out_put_spnego_key; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; if (smb_buf->WordCount != 4) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out_put_spnego_key; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); if (blob_len > bytes_remaining) { cifs_dbg(VFS, "bad security blob length %d\n", blob_len); rc = -EINVAL; goto out_put_spnego_key; } bcc_ptr += blob_len; bytes_remaining -= blob_len; /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } rc = sess_establish_session(sess_data); out_put_spnego_key: key_invalidate(spnego_key); key_put(spnego_key); out: sess_data->result = rc; sess_data->func = NULL; sess_free_buffer(sess_data); kfree(ses->auth_key.response); ses->auth_key.response = NULL; } #endif /* ! CONFIG_CIFS_UPCALL */ /* * The required kvec buffers have to be allocated before calling this * function. */ static int _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data) { struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; struct cifs_ses *ses = sess_data->ses; __u32 capabilities; char *bcc_ptr; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)pSMB; capabilities = cifs_ssetup_hdr(ses, pSMB); if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { cifs_dbg(VFS, "NTLMSSP requires Unicode support\n"); return -ENOSYS; } pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; capabilities |= CAP_EXTENDED_SECURITY; pSMB->req.Capabilities |= cpu_to_le32(capabilities); bcc_ptr = sess_data->iov[2].iov_base; /* unicode strings must be word aligned */ if ((sess_data->iov[0].iov_len + sess_data->iov[1].iov_len) % 2) { *bcc_ptr = 0; bcc_ptr++; } unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp); sess_data->iov[2].iov_len = (long) bcc_ptr - (long) sess_data->iov[2].iov_base; return 0; } static void sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data); static void sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data) { int rc; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; struct cifs_ses *ses = sess_data->ses; __u16 bytes_remaining; char *bcc_ptr; u16 blob_len; cifs_dbg(FYI, "rawntlmssp session setup negotiate phase\n"); /* * if memory allocation is successful, caller of this function * frees it. */ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL); if (!ses->ntlmssp) { rc = -ENOMEM; goto out; } ses->ntlmssp->sesskey_per_smbsess = false; /* wct = 12 */ rc = sess_alloc_buffer(sess_data, 12); if (rc) goto out; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; /* Build security blob before we assemble the request */ build_ntlmssp_negotiate_blob(pSMB->req.SecurityBlob, ses); sess_data->iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); sess_data->iov[1].iov_base = pSMB->req.SecurityBlob; pSMB->req.SecurityBlobLength = cpu_to_le16(sizeof(NEGOTIATE_MESSAGE)); rc = _sess_auth_rawntlmssp_assemble_req(sess_data); if (rc) goto out; rc = sess_sendreceive(sess_data); pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; /* If true, rc here is expected and not an error */ if (sess_data->buf0_type != CIFS_NO_BUFFER && smb_buf->Status.CifsError == cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED)) rc = 0; if (rc) goto out; cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); if (smb_buf->WordCount != 4) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out; } ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */ cifs_dbg(FYI, "UID = %llu\n", ses->Suid); bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); if (blob_len > bytes_remaining) { cifs_dbg(VFS, "bad security blob length %d\n", blob_len); rc = -EINVAL; goto out; } rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses); out: sess_free_buffer(sess_data); if (!rc) { sess_data->func = sess_auth_rawntlmssp_authenticate; return; } /* Else error. Cleanup */ kfree(ses->auth_key.response); ses->auth_key.response = NULL; kfree(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->func = NULL; sess_data->result = rc; } static void sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data) { int rc; struct smb_hdr *smb_buf; SESSION_SETUP_ANDX *pSMB; struct cifs_ses *ses = sess_data->ses; __u16 bytes_remaining; char *bcc_ptr; char *ntlmsspblob = NULL; u16 blob_len; cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n"); /* wct = 12 */ rc = sess_alloc_buffer(sess_data, 12); if (rc) goto out; /* Build security blob before we assemble the request */ pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)pSMB; /* * 5 is an empirical value, large enough to hold * authenticate message plus max 10 of av paris, * domain, user, workstation names, flags, etc. */ ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE), GFP_KERNEL); if (!ntlmsspblob) { rc = -ENOMEM; goto out; } rc = build_ntlmssp_auth_blob(ntlmsspblob, &blob_len, ses, sess_data->nls_cp); if (rc) goto out_free_ntlmsspblob; sess_data->iov[1].iov_len = blob_len; sess_data->iov[1].iov_base = ntlmsspblob; pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len); /* * Make sure that we tell the server that we are using * the uid that it just gave us back on the response * (challenge) */ smb_buf->Uid = ses->Suid; rc = _sess_auth_rawntlmssp_assemble_req(sess_data); if (rc) goto out_free_ntlmsspblob; rc = sess_sendreceive(sess_data); if (rc) goto out_free_ntlmsspblob; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; smb_buf = (struct smb_hdr *)sess_data->iov[0].iov_base; if (smb_buf->WordCount != 4) { rc = -EIO; cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount); goto out_free_ntlmsspblob; } if (le16_to_cpu(pSMB->resp.Action) & GUEST_LOGIN) cifs_dbg(FYI, "Guest login\n"); /* BB mark SesInfo struct? */ if (ses->Suid != smb_buf->Uid) { ses->Suid = smb_buf->Uid; cifs_dbg(FYI, "UID changed! new UID = %llu\n", ses->Suid); } bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength); if (blob_len > bytes_remaining) { cifs_dbg(VFS, "bad security blob length %d\n", blob_len); rc = -EINVAL; goto out_free_ntlmsspblob; } bcc_ptr += blob_len; bytes_remaining -= blob_len; /* BB check if Unicode and decode strings */ if (bytes_remaining == 0) { /* no string area to decode, do nothing */ } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; --bytes_remaining; } decode_unicode_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } else { decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses, sess_data->nls_cp); } out_free_ntlmsspblob: kfree(ntlmsspblob); out: sess_free_buffer(sess_data); if (!rc) rc = sess_establish_session(sess_data); /* Cleanup */ kfree(ses->auth_key.response); ses->auth_key.response = NULL; kfree(ses->ntlmssp); ses->ntlmssp = NULL; sess_data->func = NULL; sess_data->result = rc; } static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data) { int type; type = select_sectype(ses->server, ses->sectype); cifs_dbg(FYI, "sess setup type %d\n", type); if (type == Unspecified) { cifs_dbg(VFS, "Unable to select appropriate authentication method!"); return -EINVAL; } switch (type) { case LANMAN: /* LANMAN and plaintext are less secure and off by default. * So we make this explicitly be turned on in kconfig (in the * build) and turned on at runtime (changed from the default) * in proc/fs/cifs or via mount parm. Unfortunately this is * needed for old Win (e.g. Win95), some obscure NAS and OS/2 */ #ifdef CONFIG_CIFS_WEAK_PW_HASH sess_data->func = sess_auth_lanman; break; #else return -EOPNOTSUPP; #endif case NTLM: sess_data->func = sess_auth_ntlm; break; case NTLMv2: sess_data->func = sess_auth_ntlmv2; break; case Kerberos: #ifdef CONFIG_CIFS_UPCALL sess_data->func = sess_auth_kerberos; break; #else cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); return -ENOSYS; break; #endif /* CONFIG_CIFS_UPCALL */ case RawNTLMSSP: sess_data->func = sess_auth_rawntlmssp_negotiate; break; default: cifs_dbg(VFS, "secType %d not supported!\n", type); return -ENOSYS; } return 0; } int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_cp) { int rc = 0; struct sess_data *sess_data; if (ses == NULL) { WARN(1, "%s: ses == NULL!", __func__); return -EINVAL; } sess_data = kzalloc(sizeof(struct sess_data), GFP_KERNEL); if (!sess_data) return -ENOMEM; rc = select_sec(ses, sess_data); if (rc) goto out; sess_data->xid = xid; sess_data->ses = ses; sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->nls_cp = (struct nls_table *) nls_cp; while (sess_data->func) sess_data->func(sess_data); /* Store result before we free sess_data */ rc = sess_data->result; out: kfree(sess_data); return rc; }
gpl-2.0
codefarmer-cyk/linux
drivers/gpu/drm/msm/msm_perf.c
1592
5925
/* * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /* For profiling, userspace can: * * tail -f /sys/kernel/debug/dri/<minor>/gpu * * This will enable performance counters/profiling to track the busy time * and any gpu specific performance counters that are supported. */ #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include "msm_drv.h" #include "msm_gpu.h" struct msm_perf_state { struct drm_device *dev; bool open; int cnt; struct mutex read_lock; char buf[256]; int buftot, bufpos; unsigned long next_jiffies; struct dentry *ent; struct drm_info_node *node; }; #define SAMPLE_TIME (HZ/4) /* wait for next sample time: */ static int wait_sample(struct msm_perf_state *perf) { unsigned long start_jiffies = jiffies; if (time_after(perf->next_jiffies, start_jiffies)) { unsigned long remaining_jiffies = perf->next_jiffies - start_jiffies; int ret = schedule_timeout_interruptible(remaining_jiffies); if (ret > 0) { /* interrupted */ return -ERESTARTSYS; } } perf->next_jiffies += SAMPLE_TIME; return 0; } static int refill_buf(struct msm_perf_state *perf) { struct msm_drm_private *priv = perf->dev->dev_private; struct msm_gpu *gpu = priv->gpu; char *ptr = perf->buf; int rem = sizeof(perf->buf); int i, n; if ((perf->cnt++ % 32) == 0) { /* Header line: */ n = snprintf(ptr, rem, "%%BUSY"); ptr += n; rem -= n; for (i = 0; i < gpu->num_perfcntrs; i++) { const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i]; n = snprintf(ptr, rem, "\t%s", perfcntr->name); ptr += n; rem -= n; } } else { /* Sample line: */ uint32_t activetime = 0, totaltime = 0; uint32_t cntrs[5]; uint32_t val; int ret; /* sleep until next sample time: */ ret = wait_sample(perf); if (ret) return ret; ret = msm_gpu_perfcntr_sample(gpu, &activetime, &totaltime, ARRAY_SIZE(cntrs), cntrs); if (ret < 0) return ret; val = totaltime ? 1000 * activetime / totaltime : 0; n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10); ptr += n; rem -= n; for (i = 0; i < ret; i++) { /* cycle counters (I think).. convert to MHz.. */ val = cntrs[i] / 10000; n = snprintf(ptr, rem, "\t%5d.%02d", val / 100, val % 100); ptr += n; rem -= n; } } n = snprintf(ptr, rem, "\n"); ptr += n; rem -= n; perf->bufpos = 0; perf->buftot = ptr - perf->buf; return 0; } static ssize_t perf_read(struct file *file, char __user *buf, size_t sz, loff_t *ppos) { struct msm_perf_state *perf = file->private_data; int n = 0, ret; mutex_lock(&perf->read_lock); if (perf->bufpos >= perf->buftot) { ret = refill_buf(perf); if (ret) goto out; } n = min((int)sz, perf->buftot - perf->bufpos); ret = copy_to_user(buf, &perf->buf[perf->bufpos], n); if (ret) goto out; perf->bufpos += n; *ppos += n; out: mutex_unlock(&perf->read_lock); if (ret) return ret; return n; } static int perf_open(struct inode *inode, struct file *file) { struct msm_perf_state *perf = inode->i_private; struct drm_device *dev = perf->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu = priv->gpu; int ret = 0; mutex_lock(&dev->struct_mutex); if (perf->open || !gpu) { ret = -EBUSY; goto out; } file->private_data = perf; perf->open = true; perf->cnt = 0; perf->buftot = 0; perf->bufpos = 0; msm_gpu_perfcntr_start(gpu); perf->next_jiffies = jiffies + SAMPLE_TIME; out: mutex_unlock(&dev->struct_mutex); return ret; } static int perf_release(struct inode *inode, struct file *file) { struct msm_perf_state *perf = inode->i_private; struct msm_drm_private *priv = perf->dev->dev_private; msm_gpu_perfcntr_stop(priv->gpu); perf->open = false; return 0; } static const struct file_operations perf_debugfs_fops = { .owner = THIS_MODULE, .open = perf_open, .read = perf_read, .llseek = no_llseek, .release = perf_release, }; int msm_perf_debugfs_init(struct drm_minor *minor) { struct msm_drm_private *priv = minor->dev->dev_private; struct msm_perf_state *perf; /* only create on first minor: */ if (priv->perf) return 0; perf = kzalloc(sizeof(*perf), GFP_KERNEL); if (!perf) return -ENOMEM; perf->dev = minor->dev; mutex_init(&perf->read_lock); priv->perf = perf; perf->node = kzalloc(sizeof(*perf->node), GFP_KERNEL); if (!perf->node) goto fail; perf->ent = debugfs_create_file("perf", S_IFREG | S_IRUGO, minor->debugfs_root, perf, &perf_debugfs_fops); if (!perf->ent) { DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/perf\n", minor->debugfs_root->d_name.name); goto fail; } perf->node->minor = minor; perf->node->dent = perf->ent; perf->node->info_ent = NULL; mutex_lock(&minor->debugfs_lock); list_add(&perf->node->list, &minor->debugfs_list); mutex_unlock(&minor->debugfs_lock); return 0; fail: msm_perf_debugfs_cleanup(minor); return -1; } void msm_perf_debugfs_cleanup(struct drm_minor *minor) { struct msm_drm_private *priv = minor->dev->dev_private; struct msm_perf_state *perf = priv->perf; if (!perf) return; priv->perf = NULL; debugfs_remove(perf->ent); if (perf->node) { mutex_lock(&minor->debugfs_lock); list_del(&perf->node->list); mutex_unlock(&minor->debugfs_lock); kfree(perf->node); } mutex_destroy(&perf->read_lock); kfree(perf); } #endif
gpl-2.0
showp1984/bricked-mako
drivers/net/wireless/ath/ath9k/hw.c
1848
78809
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/io.h> #include <linux/slab.h> #include <linux/module.h> #include <asm/unaligned.h> #include "hw.h" #include "hw-ops.h" #include "rc.h" #include "ar9003_mac.h" #include "ar9003_mci.h" static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); static int __init ath9k_init(void) { return 0; } module_init(ath9k_init); static void __exit ath9k_exit(void) { return; } module_exit(ath9k_exit); /* Private hardware callbacks */ static void ath9k_hw_init_cal_settings(struct ath_hw *ah) { ath9k_hw_private_ops(ah)->init_cal_settings(ah); } static void ath9k_hw_init_mode_regs(struct ath_hw *ah) { ath9k_hw_private_ops(ah)->init_mode_regs(ah); } static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah, struct ath9k_channel *chan) { return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan); } static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) { if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs) return; ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah); } static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah) { /* You will not have this callback if using the old ANI */ if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs) return; ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah); } /********************/ /* Helper Functions */ /********************/ static void ath9k_hw_set_clockrate(struct ath_hw *ah) { struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; struct ath_common *common = ath9k_hw_common(ah); unsigned int clockrate; /* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) clockrate = 117; else if (!ah->curchan) /* should really check for CCK instead */ clockrate = ATH9K_CLOCK_RATE_CCK; else if (conf->channel->band == IEEE80211_BAND_2GHZ) clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM; else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK) clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM; else clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; if (conf_is_ht40(conf)) clockrate *= 2; if (ah->curchan) { if (IS_CHAN_HALF_RATE(ah->curchan)) clockrate /= 2; if (IS_CHAN_QUARTER_RATE(ah->curchan)) clockrate /= 4; } common->clockrate = clockrate; } static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) { struct ath_common *common = ath9k_hw_common(ah); return usecs * common->clockrate; } bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) { int i; BUG_ON(timeout < AH_TIME_QUANTUM); for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) { if ((REG_READ(ah, reg) & mask) == val) return true; udelay(AH_TIME_QUANTUM); } ath_dbg(ath9k_hw_common(ah), ANY, "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", timeout, reg, REG_READ(ah, reg), mask, val); return false; } EXPORT_SYMBOL(ath9k_hw_wait); void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, int column, unsigned int *writecnt) { int r; ENABLE_REGWRITE_BUFFER(ah); for (r = 0; r < array->ia_rows; r++) { REG_WRITE(ah, INI_RA(array, r, 0), INI_RA(array, r, column)); DO_DELAY(*writecnt); } REGWRITE_BUFFER_FLUSH(ah); } u32 ath9k_hw_reverse_bits(u32 val, u32 n) { u32 retval; int i; for (i = 0, retval = 0; i < n; i++) { retval = (retval << 1) | (val & 1); val >>= 1; } return retval; } u16 ath9k_hw_computetxtime(struct ath_hw *ah, u8 phy, int kbps, u32 frameLen, u16 rateix, bool shortPreamble) { u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime; if (kbps == 0) return 0; switch (phy) { case WLAN_RC_PHY_CCK: phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS; if (shortPreamble) phyTime >>= 1; numBits = frameLen << 3; txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps); break; case WLAN_RC_PHY_OFDM: if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME_QUARTER + OFDM_PREAMBLE_TIME_QUARTER + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); } else if (ah->curchan && IS_CHAN_HALF_RATE(ah->curchan)) { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME_HALF + OFDM_PREAMBLE_TIME_HALF + (numSymbols * OFDM_SYMBOL_TIME_HALF); } else { bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000; numBits = OFDM_PLCP_BITS + (frameLen << 3); numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME + (numSymbols * OFDM_SYMBOL_TIME); } break; default: ath_err(ath9k_hw_common(ah), "Unknown phy %u (rate ix %u)\n", phy, rateix); txTime = 0; break; } return txTime; } EXPORT_SYMBOL(ath9k_hw_computetxtime); void ath9k_hw_get_channel_centers(struct ath_hw *ah, struct ath9k_channel *chan, struct chan_centers *centers) { int8_t extoff; if (!IS_CHAN_HT40(chan)) { centers->ctl_center = centers->ext_center = centers->synth_center = chan->channel; return; } if ((chan->chanmode == CHANNEL_A_HT40PLUS) || (chan->chanmode == CHANNEL_G_HT40PLUS)) { centers->synth_center = chan->channel + HT40_CHANNEL_CENTER_SHIFT; extoff = 1; } else { centers->synth_center = chan->channel - HT40_CHANNEL_CENTER_SHIFT; extoff = -1; } centers->ctl_center = centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); /* 25 MHz spacing is supported by hw but not on upper layers */ centers->ext_center = centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT); } /******************/ /* Chip Revisions */ /******************/ static void ath9k_hw_read_revisions(struct ath_hw *ah) { u32 val; switch (ah->hw_version.devid) { case AR5416_AR9100_DEVID: ah->hw_version.macVersion = AR_SREV_VERSION_9100; break; case AR9300_DEVID_AR9330: ah->hw_version.macVersion = AR_SREV_VERSION_9330; if (ah->get_mac_revision) { ah->hw_version.macRev = ah->get_mac_revision(); } else { val = REG_READ(ah, AR_SREV); ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); } return; case AR9300_DEVID_AR9340: ah->hw_version.macVersion = AR_SREV_VERSION_9340; val = REG_READ(ah, AR_SREV); ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); return; } val = REG_READ(ah, AR_SREV) & AR_SREV_ID; if (val == 0xFF) { val = REG_READ(ah, AR_SREV); ah->hw_version.macVersion = (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); if (AR_SREV_9462(ah)) ah->is_pciexpress = true; else ah->is_pciexpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; } else { if (!AR_SREV_9100(ah)) ah->hw_version.macVersion = MS(val, AR_SREV_VERSION); ah->hw_version.macRev = val & AR_SREV_REVISION; if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ah->is_pciexpress = true; } } /************************************/ /* HW Attach, Detach, Init Routines */ /************************************/ static void ath9k_hw_disablepcie(struct ath_hw *ah) { if (!AR_SREV_5416(ah)) return; REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824); REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579); REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000); REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); } static void ath9k_hw_aspm_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (common->bus_ops->aspm_init) common->bus_ops->aspm_init(common); } /* This should work for all families including legacy */ static bool ath9k_hw_chip_test(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 regAddr[2] = { AR_STA_ID0 }; u32 regHold[2]; static const u32 patternData[4] = { 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999 }; int i, j, loop_max; if (!AR_SREV_9300_20_OR_LATER(ah)) { loop_max = 2; regAddr[1] = AR_PHY_BASE + (8 << 2); } else loop_max = 1; for (i = 0; i < loop_max; i++) { u32 addr = regAddr[i]; u32 wrData, rdData; regHold[i] = REG_READ(ah, addr); for (j = 0; j < 0x100; j++) { wrData = (j << 16) | j; REG_WRITE(ah, addr, wrData); rdData = REG_READ(ah, addr); if (rdData != wrData) { ath_err(common, "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", addr, wrData, rdData); return false; } } for (j = 0; j < 4; j++) { wrData = patternData[j]; REG_WRITE(ah, addr, wrData); rdData = REG_READ(ah, addr); if (wrData != rdData) { ath_err(common, "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", addr, wrData, rdData); return false; } } REG_WRITE(ah, regAddr[i], regHold[i]); } udelay(100); return true; } static void ath9k_hw_init_config(struct ath_hw *ah) { int i; ah->config.dma_beacon_response_time = 2; ah->config.sw_beacon_response_time = 10; ah->config.additional_swba_backoff = 0; ah->config.ack_6mb = 0x0; ah->config.cwm_ignore_extcca = 0; ah->config.pcie_clock_req = 0; ah->config.pcie_waen = 0; ah->config.analog_shiftreg = 1; ah->config.enable_ani = true; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { ah->config.spurchans[i][0] = AR_NO_SPUR; ah->config.spurchans[i][1] = AR_NO_SPUR; } /* PAPRD needs some more work to be enabled */ ah->config.paprd_disable = 1; ah->config.rx_intr_mitigation = true; ah->config.pcieSerDesWrite = true; /* * We need this for PCI devices only (Cardbus, PCI, miniPCI) * _and_ if on non-uniprocessor systems (Multiprocessor/HT). * This means we use it for all AR5416 devices, and the few * minor PCI AR9280 devices out there. * * Serialization is required because these devices do not handle * well the case of two concurrent reads/writes due to the latency * involved. During one read/write another read/write can be issued * on another CPU while the previous read/write may still be working * on our hardware, if we hit this case the hardware poops in a loop. * We prevent this by serializing reads and writes. * * This issue is not present on PCI-Express devices or pre-AR5416 * devices (legacy, 802.11abg). */ if (num_possible_cpus() > 1) ah->config.serialize_regmode = SER_REG_MODE_AUTO; } static void ath9k_hw_init_defaults(struct ath_hw *ah) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); regulatory->country_code = CTRY_DEFAULT; regulatory->power_limit = MAX_RATE_POWER; ah->hw_version.magic = AR5416_MAGIC; ah->hw_version.subvendorid = 0; ah->atim_window = 0; ah->sta_id1_defaults = AR_STA_ID1_CRPT_MIC_ENABLE | AR_STA_ID1_MCAST_KSRCH; if (AR_SREV_9100(ah)) ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; ah->enable_32kHz_clock = DONT_USE_32KHZ; ah->slottime = ATH9K_SLOT_TIME_9; ah->globaltxtimeout = (u32) -1; ah->power_mode = ATH9K_PM_UNDEFINED; ah->htc_reset_init = true; } static int ath9k_hw_init_macaddr(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u32 sum; int i; u16 eeval; static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW }; sum = 0; for (i = 0; i < 3; i++) { eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]); sum += eeval; common->macaddr[2 * i] = eeval >> 8; common->macaddr[2 * i + 1] = eeval & 0xff; } if (sum == 0 || sum == 0xffff * 3) return -EADDRNOTAVAIL; return 0; } static int ath9k_hw_post_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); int ecode; if (common->bus_ops->ath_bus_type != ATH_USB) { if (!ath9k_hw_chip_test(ah)) return -ENODEV; } if (!AR_SREV_9300_20_OR_LATER(ah)) { ecode = ar9002_hw_rf_claim(ah); if (ecode != 0) return ecode; } ecode = ath9k_hw_eeprom_init(ah); if (ecode != 0) return ecode; ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n", ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); ecode = ath9k_hw_rf_alloc_ext_banks(ah); if (ecode) { ath_err(ath9k_hw_common(ah), "Failed allocating banks for external radio\n"); ath9k_hw_rf_free_ext_banks(ah); return ecode; } if (ah->config.enable_ani) { ath9k_hw_ani_setup(ah); ath9k_hw_ani_init(ah); } return 0; } static void ath9k_hw_attach_ops(struct ath_hw *ah) { if (AR_SREV_9300_20_OR_LATER(ah)) ar9003_hw_attach_ops(ah); else ar9002_hw_attach_ops(ah); } /* Called for all hardware families */ static int __ath9k_hw_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); int r = 0; ath9k_hw_read_revisions(ah); /* * Read back AR_WA into a permanent copy and set bits 14 and 17. * We need to do this to avoid RMW of this register. We cannot * read the reg when chip is asleep. */ ah->WARegVal = REG_READ(ah, AR_WA); ah->WARegVal |= (AR_WA_D3_L1_DISABLE | AR_WA_ASPM_TIMER_BASED_DISABLE); if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { ath_err(common, "Couldn't reset chip\n"); return -EIO; } if (AR_SREV_9462(ah)) ah->WARegVal &= ~AR_WA_D3_L1_DISABLE; ath9k_hw_init_defaults(ah); ath9k_hw_init_config(ah); ath9k_hw_attach_ops(ah); if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { ath_err(common, "Couldn't wakeup chip\n"); return -EIO; } if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) { if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) && !ah->is_pciexpress)) { ah->config.serialize_regmode = SER_REG_MODE_ON; } else { ah->config.serialize_regmode = SER_REG_MODE_OFF; } } ath_dbg(common, RESET, "serialize_regmode is %d\n", ah->config.serialize_regmode); if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1; else ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; switch (ah->hw_version.macVersion) { case AR_SREV_VERSION_5416_PCI: case AR_SREV_VERSION_5416_PCIE: case AR_SREV_VERSION_9160: case AR_SREV_VERSION_9100: case AR_SREV_VERSION_9280: case AR_SREV_VERSION_9285: case AR_SREV_VERSION_9287: case AR_SREV_VERSION_9271: case AR_SREV_VERSION_9300: case AR_SREV_VERSION_9330: case AR_SREV_VERSION_9485: case AR_SREV_VERSION_9340: case AR_SREV_VERSION_9462: break; default: ath_err(common, "Mac Chip Rev 0x%02x.%x is not supported by this driver\n", ah->hw_version.macVersion, ah->hw_version.macRev); return -EOPNOTSUPP; } if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) ah->is_pciexpress = false; ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); ath9k_hw_init_cal_settings(ah); ah->ani_function = ATH9K_ANI_ALL; if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; if (!AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function &= ~ATH9K_ANI_MRC_CCK; /* disable ANI for 9340 */ if (AR_SREV_9340(ah)) ah->config.enable_ani = false; ath9k_hw_init_mode_regs(ah); if (!ah->is_pciexpress) ath9k_hw_disablepcie(ah); r = ath9k_hw_post_init(ah); if (r) return r; ath9k_hw_init_mode_gain_regs(ah); r = ath9k_hw_fill_cap_info(ah); if (r) return r; if (ah->is_pciexpress) ath9k_hw_aspm_init(ah); r = ath9k_hw_init_macaddr(ah); if (r) { ath_err(common, "Failed to initialize MAC address\n"); return r; } if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S); else ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); if (AR_SREV_9330(ah)) ah->bb_watchdog_timeout_ms = 85; else ah->bb_watchdog_timeout_ms = 25; common->state = ATH_HW_INITIALIZED; return 0; } int ath9k_hw_init(struct ath_hw *ah) { int ret; struct ath_common *common = ath9k_hw_common(ah); /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */ switch (ah->hw_version.devid) { case AR5416_DEVID_PCI: case AR5416_DEVID_PCIE: case AR5416_AR9100_DEVID: case AR9160_DEVID_PCI: case AR9280_DEVID_PCI: case AR9280_DEVID_PCIE: case AR9285_DEVID_PCIE: case AR9287_DEVID_PCI: case AR9287_DEVID_PCIE: case AR2427_DEVID_PCIE: case AR9300_DEVID_PCIE: case AR9300_DEVID_AR9485_PCIE: case AR9300_DEVID_AR9330: case AR9300_DEVID_AR9340: case AR9300_DEVID_AR9580: case AR9300_DEVID_AR9462: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) break; ath_err(common, "Hardware device ID 0x%04x not supported\n", ah->hw_version.devid); return -EOPNOTSUPP; } ret = __ath9k_hw_init(ah); if (ret) { ath_err(common, "Unable to initialize hardware; initialization status: %d\n", ret); return ret; } return 0; } EXPORT_SYMBOL(ath9k_hw_init); static void ath9k_hw_init_qos(struct ath_hw *ah) { ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); REG_WRITE(ah, AR_QOS_NO_ACK, SM(2, AR_QOS_NO_ACK_TWO_BIT) | SM(5, AR_QOS_NO_ACK_BIT_OFF) | SM(0, AR_QOS_NO_ACK_BYTE_OFF)); REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL); REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF); REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF); REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF); REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); REGWRITE_BUFFER_FLUSH(ah); } u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) { REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); udelay(100); REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) udelay(100); return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; } EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); static void ath9k_hw_init_pll(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; if (AR_SREV_9485(ah)) { /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_PLL_PWD, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, 0x40); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x4); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_REFDIV, 0x5); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_NINI, 0x58); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, AR_CH0_BB_DPLL1_NFRAC, 0x0); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_OUTDIV, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_LOCAL_PLL, 0x1); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_EN_NEGTRIG, 0x1); /* program BB PLL phase_shift to 0x6 */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x6); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_BB_DPLL2_PLL_PWD, 0x0); udelay(1000); } else if (AR_SREV_9330(ah)) { u32 ddr_dpll2, pll_control2, kd; if (ah->is_clk_25mhz) { ddr_dpll2 = 0x18e82f01; pll_control2 = 0xe04a3d; kd = 0x1d; } else { ddr_dpll2 = 0x19e82f01; pll_control2 = 0x886666; kd = 0x3d; } /* program DDR PLL ki and kd value */ REG_WRITE(ah, AR_CH0_DDR_DPLL2, ddr_dpll2); /* program DDR PLL phase_shift */ REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3, AR_CH0_DPLL3_PHASE_SHIFT, 0x1); REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); udelay(1000); /* program refdiv, nint, frac to RTC register */ REG_WRITE(ah, AR_RTC_PLL_CONTROL2, pll_control2); /* program BB PLL kd and ki value */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, kd); REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x06); /* program BB PLL phase_shift */ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1); } else if (AR_SREV_9340(ah)) { u32 regval, pll2_divint, pll2_divfrac, refdiv; REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); udelay(1000); REG_SET_BIT(ah, AR_PHY_PLL_MODE, 0x1 << 16); udelay(100); if (ah->is_clk_25mhz) { pll2_divint = 0x54; pll2_divfrac = 0x1eb85; refdiv = 3; } else { pll2_divint = 88; pll2_divfrac = 0; refdiv = 5; } regval = REG_READ(ah, AR_PHY_PLL_MODE); regval |= (0x1 << 16); REG_WRITE(ah, AR_PHY_PLL_MODE, regval); udelay(100); REG_WRITE(ah, AR_PHY_PLL_CONTROL, (refdiv << 27) | (pll2_divint << 18) | pll2_divfrac); udelay(100); regval = REG_READ(ah, AR_PHY_PLL_MODE); regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) | (0x4 << 26) | (0x18 << 19); REG_WRITE(ah, AR_PHY_PLL_MODE, regval); REG_WRITE(ah, AR_PHY_PLL_MODE, REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff); udelay(1000); } pll = ath9k_hw_compute_pll_control(ah, chan); REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) udelay(1000); /* Switch the core clock for ar9271 to 117Mhz */ if (AR_SREV_9271(ah)) { udelay(500); REG_WRITE(ah, 0x50040, 0x304); } udelay(RTC_PLL_SETTLE_DELAY); REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); if (AR_SREV_9340(ah)) { if (ah->is_clk_25mhz) { REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); } else { REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); } udelay(100); } } static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, enum nl80211_iftype opmode) { u32 sync_default = AR_INTR_SYNC_DEFAULT; u32 imr_reg = AR_IMR_TXERR | AR_IMR_TXURN | AR_IMR_RXERR | AR_IMR_RXORN | AR_IMR_BCNMISC; if (AR_SREV_9340(ah)) sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; if (AR_SREV_9300_20_OR_LATER(ah)) { imr_reg |= AR_IMR_RXOK_HP; if (ah->config.rx_intr_mitigation) imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; else imr_reg |= AR_IMR_RXOK_LP; } else { if (ah->config.rx_intr_mitigation) imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; else imr_reg |= AR_IMR_RXOK; } if (ah->config.tx_intr_mitigation) imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR; else imr_reg |= AR_IMR_TXOK; if (opmode == NL80211_IFTYPE_AP) imr_reg |= AR_IMR_MIB; ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_IMR, imr_reg); ah->imrs2_reg |= AR_IMR_S2_GTT; REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); if (!AR_SREV_9100(ah)) { REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); } REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0); REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0); REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0); REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0); } } static void ath9k_hw_set_sifs_time(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us - 2); val = min(val, (u32) 0xFFFF); REG_WRITE(ah, AR_D_GBL_IFS_SIFS, val); } static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) 0xFFFF); REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val); } static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK)); REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val); } static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) { u32 val = ath9k_hw_mac_to_clks(ah, us); val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS)); REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val); } static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) { if (tu > 0xFFFF) { ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n", tu); ah->globaltxtimeout = (u32) -1; return false; } else { REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu); ah->globaltxtimeout = tu; return true; } } void ath9k_hw_init_global_settings(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; const struct ath9k_channel *chan = ah->curchan; int acktimeout, ctstimeout; int slottime; int sifstime; int rx_lat = 0, tx_lat = 0, eifs = 0; u32 reg; ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n", ah->misc_mode); if (!chan) return; if (ah->misc_mode != 0) REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode); if (IS_CHAN_A_FAST_CLOCK(ah, chan)) rx_lat = 41; else rx_lat = 37; tx_lat = 54; if (IS_CHAN_HALF_RATE(chan)) { eifs = 175; rx_lat *= 2; tx_lat *= 2; if (IS_CHAN_A_FAST_CLOCK(ah, chan)) tx_lat += 11; slottime = 13; sifstime = 32; } else if (IS_CHAN_QUARTER_RATE(chan)) { eifs = 340; rx_lat = (rx_lat * 4) - 1; tx_lat *= 4; if (IS_CHAN_A_FAST_CLOCK(ah, chan)) tx_lat += 22; slottime = 21; sifstime = 64; } else { if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO; reg = AR_USEC_ASYNC_FIFO; } else { eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/ common->clockrate; reg = REG_READ(ah, AR_USEC); } rx_lat = MS(reg, AR_USEC_RX_LAT); tx_lat = MS(reg, AR_USEC_TX_LAT); slottime = ah->slottime; if (IS_CHAN_5GHZ(chan)) sifstime = 16; else sifstime = 10; } /* As defined by IEEE 802.11-2007 17.3.8.6 */ acktimeout = slottime + sifstime + 3 * ah->coverage_class; ctstimeout = acktimeout; /* * Workaround for early ACK timeouts, add an offset to match the * initval's 64us ack timeout value. Use 48us for the CTS timeout. * This was initially only meant to work around an issue with delayed * BA frames in some implementations, but it has been found to fix ACK * timeout issues in other cases as well. */ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) { acktimeout += 64 - sifstime - ah->slottime; ctstimeout += 48 - sifstime - ah->slottime; } ath9k_hw_set_sifs_time(ah, sifstime); ath9k_hw_setslottime(ah, slottime); ath9k_hw_set_ack_timeout(ah, acktimeout); ath9k_hw_set_cts_timeout(ah, ctstimeout); if (ah->globaltxtimeout != (u32) -1) ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); REG_WRITE(ah, AR_D_GBL_IFS_EIFS, ath9k_hw_mac_to_clks(ah, eifs)); REG_RMW(ah, AR_USEC, (common->clockrate - 1) | SM(rx_lat, AR_USEC_RX_LAT) | SM(tx_lat, AR_USEC_TX_LAT), AR_USEC_TX_LAT | AR_USEC_RX_LAT | AR_USEC_USEC); } EXPORT_SYMBOL(ath9k_hw_init_global_settings); void ath9k_hw_deinit(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (common->state < ATH_HW_INITIALIZED) goto free_hw; ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); free_hw: ath9k_hw_rf_free_ext_banks(ah); } EXPORT_SYMBOL(ath9k_hw_deinit); /*******/ /* INI */ /*******/ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan) { u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); if (IS_CHAN_B(chan)) ctl |= CTL_11B; else if (IS_CHAN_G(chan)) ctl |= CTL_11G; else ctl |= CTL_11A; return ctl; } /****************************************/ /* Reset and Channel Switching Routines */ /****************************************/ static inline void ath9k_hw_set_dma(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); ENABLE_REGWRITE_BUFFER(ah); /* * set AHB_MODE not to do cacheline prefetches */ if (!AR_SREV_9300_20_OR_LATER(ah)) REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN); /* * let mac dma reads be in 128 byte chunks */ REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK); REGWRITE_BUFFER_FLUSH(ah); /* * Restore TX Trigger Level to its pre-reset value. * The initial value depends on whether aggregation is enabled, and is * adjusted whenever underruns are detected. */ if (!AR_SREV_9300_20_OR_LATER(ah)) REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level); ENABLE_REGWRITE_BUFFER(ah); /* * let mac dma writes be in 128 byte chunks */ REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK); /* * Setup receive FIFO threshold to hold off TX activities */ REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1); REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1); ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - ah->caps.rx_status_len); } /* * reduce the number of usable entries in PCU TXBUF to avoid * wrap around issues. */ if (AR_SREV_9285(ah)) { /* For AR9285 the number of Fifos are reduced to half. * So set the usable tx buf size also to half to * avoid data/delimiter underruns */ REG_WRITE(ah, AR_PCU_TXBUF_CTRL, AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); } else if (!AR_SREV_9271(ah)) { REG_WRITE(ah, AR_PCU_TXBUF_CTRL, AR_PCU_TXBUF_CTRL_USABLE_SIZE); } REGWRITE_BUFFER_FLUSH(ah); if (AR_SREV_9300_20_OR_LATER(ah)) ath9k_hw_reset_txstatus_ring(ah); } static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) { u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC; u32 set = AR_STA_ID1_KSRCH_MODE; switch (opmode) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: set |= AR_STA_ID1_ADHOC; REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); break; case NL80211_IFTYPE_AP: set |= AR_STA_ID1_STA_AP; /* fall through */ case NL80211_IFTYPE_STATION: REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); break; default: if (!ah->is_monitoring) set = 0; break; } REG_RMW(ah, AR_STA_ID1, set, mask); } void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, u32 *coef_mantissa, u32 *coef_exponent) { u32 coef_exp, coef_man; for (coef_exp = 31; coef_exp > 0; coef_exp--) if ((coef_scaled >> coef_exp) & 0x1) break; coef_exp = 14 - (coef_exp - COEF_SCALE_S); coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1)); *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp); *coef_exponent = coef_exp - 16; } static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) { u32 rst_flags; u32 tmpReg; if (AR_SREV_9100(ah)) { REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK, AR_RTC_DERIVED_CLK_PERIOD, 1); (void)REG_READ(ah, AR_RTC_DERIVED_CLK); } ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (AR_SREV_9100(ah)) { rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD | AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; } else { tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); if (tmpReg & (AR_INTR_SYNC_LOCAL_TIMEOUT | AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { u32 val; REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); val = AR_RC_HOSTIF; if (!AR_SREV_9300_20_OR_LATER(ah)) val |= AR_RC_AHB; REG_WRITE(ah, AR_RC, val); } else if (!AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB); rst_flags = AR_RTC_RC_MAC_WARM; if (type == ATH9K_RESET_COLD) rst_flags |= AR_RTC_RC_MAC_COLD; } if (AR_SREV_9330(ah)) { int npend = 0; int i; /* AR9330 WAR: * call external reset function to reset WMAC if: * - doing a cold reset * - we have pending frames in the TX queues */ for (i = 0; i < AR_NUM_QCU; i++) { npend = ath9k_hw_numtxpending(ah, i); if (npend) break; } if (ah->external_reset && (npend || type == ATH9K_RESET_COLD)) { int reset_err = 0; ath_dbg(ath9k_hw_common(ah), RESET, "reset MAC via external reset\n"); reset_err = ah->external_reset(); if (reset_err) { ath_err(ath9k_hw_common(ah), "External reset failed, err=%d\n", reset_err); return false; } REG_WRITE(ah, AR_RTC_RESET, 1); } } REG_WRITE(ah, AR_RTC_RC, rst_flags); REGWRITE_BUFFER_FLUSH(ah); udelay(50); REG_WRITE(ah, AR_RTC_RC, 0); if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n"); return false; } if (!AR_SREV_9100(ah)) REG_WRITE(ah, AR_RC, 0); if (AR_SREV_9100(ah)) udelay(50); return true; } static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) { ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB); REG_WRITE(ah, AR_RTC_RESET, 0); REGWRITE_BUFFER_FLUSH(ah); if (!AR_SREV_9300_20_OR_LATER(ah)) udelay(2); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, 0); REG_WRITE(ah, AR_RTC_RESET, 1); if (!ath9k_hw_wait(ah, AR_RTC_STATUS, AR_RTC_STATUS_M, AR_RTC_STATUS_ON, AH_WAIT_TIMEOUT)) { ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n"); return false; } return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); } static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) { bool ret = false; if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); switch (type) { case ATH9K_RESET_POWER_ON: ret = ath9k_hw_set_reset_power_on(ah); break; case ATH9K_RESET_WARM: case ATH9K_RESET_COLD: ret = ath9k_hw_set_reset(ah, type); break; default: break; } if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); return ret; } static bool ath9k_hw_chip_reset(struct ath_hw *ah, struct ath9k_channel *chan) { int reset_type = ATH9K_RESET_WARM; if (AR_SREV_9280(ah)) { if (ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) reset_type = ATH9K_RESET_POWER_ON; else reset_type = ATH9K_RESET_COLD; } if (!ath9k_hw_set_reset_reg(ah, reset_type)) return false; if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return false; ah->chip_fullsleep = false; ath9k_hw_init_pll(ah, chan); ath9k_hw_set_rfmode(ah, chan); return true; } static bool ath9k_hw_channel_change(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); u32 qnum; int r; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); bool band_switch, mode_diff; u8 ini_reloaded; band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) != (ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)); mode_diff = (chan->chanmode != ah->curchan->chanmode); for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { if (ath9k_hw_numtxpending(ah, qnum)) { ath_dbg(common, QUEUE, "Transmit frames pending on queue %d\n", qnum); return false; } } if (!ath9k_hw_rfbus_req(ah)) { ath_err(common, "Could not kill baseband RX\n"); return false; } if (edma && (band_switch || mode_diff)) { ath9k_hw_mark_phy_inactive(ah); udelay(5); ath9k_hw_init_pll(ah, NULL); if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) { ath_err(common, "Failed to do fast channel change\n"); return false; } } ath9k_hw_set_channel_regs(ah, chan); r = ath9k_hw_rf_set_freq(ah, chan); if (r) { ath_err(common, "Failed to set channel\n"); return false; } ath9k_hw_set_clockrate(ah); ath9k_hw_apply_txpower(ah, chan, false); ath9k_hw_rfbus_done(ah); if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) ath9k_hw_set_delta_slope(ah, chan); ath9k_hw_spur_mitigate_freq(ah, chan); if (edma && (band_switch || mode_diff)) { ah->ah_flags |= AH_FASTCC; if (band_switch || ini_reloaded) ah->eep_ops->set_board_values(ah, chan); ath9k_hw_init_bb(ah, chan); if (band_switch || ini_reloaded) ath9k_hw_init_cal(ah, chan); ah->ah_flags &= ~AH_FASTCC; } return true; } static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) { u32 gpio_mask = ah->gpio_mask; int i; for (i = 0; gpio_mask; i++, gpio_mask >>= 1) { if (!(gpio_mask & 1)) continue; ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); } } bool ath9k_hw_check_alive(struct ath_hw *ah) { int count = 50; u32 reg; if (AR_SREV_9285_12_OR_LATER(ah)) return true; do { reg = REG_READ(ah, AR_OBS_BUS_1); if ((reg & 0x7E7FFFEF) == 0x00702400) continue; switch (reg & 0x7E000B00) { case 0x1E000000: case 0x52000B00: case 0x18000B00: continue; default: return true; } } while (count-- > 0); return false; } EXPORT_SYMBOL(ath9k_hw_check_alive); /* * Fast channel change: * (Change synthesizer based on channel freq without resetting chip) * * Don't do FCC when * - Flag is not set * - Chip is just coming out of full sleep * - Channel to be set is same as current channel * - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel) */ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); int ret; if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI) goto fail; if (ah->chip_fullsleep) goto fail; if (!ah->curchan) goto fail; if (chan->channel == ah->curchan->channel) goto fail; if ((chan->channelFlags & CHANNEL_ALL) != (ah->curchan->channelFlags & CHANNEL_ALL)) goto fail; if (!ath9k_hw_check_alive(ah)) goto fail; /* * For AR9462, make sure that calibration data for * re-using are present. */ if (AR_SREV_9462(ah) && (!ah->caldata || !ah->caldata->done_txiqcal_once || !ah->caldata->done_txclcal_once || !ah->caldata->rtt_hist.num_readings)) goto fail; ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n", ah->curchan->channel, chan->channel); ret = ath9k_hw_channel_change(ah, chan); if (!ret) goto fail; ath9k_hw_loadnf(ah, ah->curchan); ath9k_hw_start_nfcal(ah, true); if ((ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && ar9003_mci_is_ready(ah)) ar9003_mci_2g5g_switch(ah, true); if (AR_SREV_9271(ah)) ar9002_hw_load_ani_reg(ah, chan); return 0; fail: return -EINVAL; } int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, struct ath9k_hw_cal_data *caldata, bool fastcc) { struct ath_common *common = ath9k_hw_common(ah); u32 saveLedState; u32 saveDefAntenna; u32 macStaId1; u64 tsf = 0; int i, r; bool start_mci_reset = false; bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI); bool save_fullsleep = ah->chip_fullsleep; if (mci) { start_mci_reset = ar9003_mci_start_reset(ah, chan); if (start_mci_reset) return 0; } if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return -EIO; if (ah->curchan && !ah->chip_fullsleep) ath9k_hw_getnf(ah, ah->curchan); ah->caldata = caldata; if (caldata && (chan->channel != caldata->channel || (chan->channelFlags & ~CHANNEL_CW_INT) != (caldata->channelFlags & ~CHANNEL_CW_INT))) { /* Operating channel changed, reset channel calibration data */ memset(caldata, 0, sizeof(*caldata)); ath9k_init_nfcal_hist_buffer(ah, chan); } ah->noise = ath9k_hw_getchan_noise(ah, chan); if (fastcc) { r = ath9k_hw_do_fastcc(ah, chan); if (!r) return r; } if (mci) ar9003_mci_stop_bt(ah, save_fullsleep); saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); if (saveDefAntenna == 0) saveDefAntenna = 1; macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; /* For chips on which RTC reset is done, save TSF before it gets cleared */ if (AR_SREV_9100(ah) || (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))) tsf = ath9k_hw_gettsf64(ah); saveLedState = REG_READ(ah, AR_CFG_LED) & (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW); ath9k_hw_mark_phy_inactive(ah); ah->paprd_table_write_done = false; /* Only required on the first reset */ if (AR_SREV_9271(ah) && ah->htc_reset_init) { REG_WRITE(ah, AR9271_RESET_POWER_DOWN_CONTROL, AR9271_RADIO_RF_RST); udelay(50); } if (!ath9k_hw_chip_reset(ah, chan)) { ath_err(common, "Chip reset failed\n"); return -EINVAL; } /* Only required on the first reset */ if (AR_SREV_9271(ah) && ah->htc_reset_init) { ah->htc_reset_init = false; REG_WRITE(ah, AR9271_RESET_POWER_DOWN_CONTROL, AR9271_GATE_MAC_CTL); udelay(50); } /* Restore TSF */ if (tsf) ath9k_hw_settsf64(ah, tsf); if (AR_SREV_9280_20_OR_LATER(ah)) REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); if (!AR_SREV_9300_20_OR_LATER(ah)) ar9002_hw_enable_async_fifo(ah); r = ath9k_hw_process_ini(ah, chan); if (r) return r; if (mci) ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep); /* * Some AR91xx SoC devices frequently fail to accept TSF writes * right after the chip reset. When that happens, write a new * value after the initvals have been applied, with an offset * based on measured time difference */ if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) { tsf += 1500; ath9k_hw_settsf64(ah, tsf); } /* Setup MFP options for CCMP */ if (AR_SREV_9280_20_OR_LATER(ah)) { /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt * frames when constructing CCMP AAD. */ REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT, 0xc7ff); ah->sw_mgmt_crypto = false; } else if (AR_SREV_9160_10_OR_LATER(ah)) { /* Disable hardware crypto for management frames */ REG_CLR_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE); REG_SET_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT); ah->sw_mgmt_crypto = true; } else ah->sw_mgmt_crypto = true; if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) ath9k_hw_set_delta_slope(ah, chan); ath9k_hw_spur_mitigate_freq(ah, chan); ah->eep_ops->set_board_values(ah, chan); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4) | macStaId1 | AR_STA_ID1_RTS_USE_DEF | (ah->config. ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) | ah->sta_id1_defaults); ath_hw_setbssidmask(common); REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); ath9k_hw_write_associd(ah); REG_WRITE(ah, AR_ISR, ~0); REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); REGWRITE_BUFFER_FLUSH(ah); ath9k_hw_set_operating_mode(ah, ah->opmode); r = ath9k_hw_rf_set_freq(ah, chan); if (r) return r; ath9k_hw_set_clockrate(ah); ENABLE_REGWRITE_BUFFER(ah); for (i = 0; i < AR_NUM_DCU; i++) REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); REGWRITE_BUFFER_FLUSH(ah); ah->intr_txqs = 0; for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) ath9k_hw_resettxqueue(ah, i); ath9k_hw_init_interrupt_masks(ah, ah->opmode); ath9k_hw_ani_cache_ini_regs(ah); ath9k_hw_init_qos(ah); if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); ath9k_hw_init_global_settings(ah); if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER, AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768); REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN, AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL); REG_SET_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_ENABLE_AGGWEP); } REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); ath9k_hw_set_dma(ah); REG_WRITE(ah, AR_OBS, 8); if (ah->config.rx_intr_mitigation) { REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); } if (ah->config.tx_intr_mitigation) { REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300); REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750); } ath9k_hw_init_bb(ah, chan); if (caldata) { caldata->done_txiqcal_once = false; caldata->done_txclcal_once = false; caldata->rtt_hist.num_readings = 0; } if (!ath9k_hw_init_cal(ah, chan)) return -EIO; ath9k_hw_loadnf(ah, chan); ath9k_hw_start_nfcal(ah, true); if (mci && ar9003_mci_end_reset(ah, chan, caldata)) return -EIO; ENABLE_REGWRITE_BUFFER(ah); ath9k_hw_restore_chainmask(ah); REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); REGWRITE_BUFFER_FLUSH(ah); /* * For big endian systems turn on swapping for descriptors */ if (AR_SREV_9100(ah)) { u32 mask; mask = REG_READ(ah, AR_CFG); if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n", mask); } else { mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; REG_WRITE(ah, AR_CFG, mask); ath_dbg(common, RESET, "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); } } else { if (common->bus_ops->ath_bus_type == ATH_USB) { /* Configure AR9271 target WLAN */ if (AR_SREV_9271(ah)) REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); else REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); } #ifdef __BIG_ENDIAN else if (AR_SREV_9330(ah) || AR_SREV_9340(ah)) REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0); else REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); #endif } if (ath9k_hw_btcoex_is_enabled(ah)) ath9k_hw_btcoex_enable(ah); if (mci) ar9003_mci_check_bt(ah); if (AR_SREV_9300_20_OR_LATER(ah)) { ar9003_hw_bb_watchdog_config(ah); ar9003_hw_disable_phy_restart(ah); } ath9k_hw_apply_gpio_override(ah); return 0; } EXPORT_SYMBOL(ath9k_hw_reset); /******************************/ /* Power Management (Chipset) */ /******************************/ /* * Notify Power Mgt is disabled in self-generated frames. * If requested, force chip to sleep. */ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) { REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); if (setChip) { if (AR_SREV_9462(ah)) { REG_WRITE(ah, AR_TIMER_MODE, REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00); REG_WRITE(ah, AR_NDP2_TIMER_MODE, REG_READ(ah, AR_NDP2_TIMER_MODE) & 0xFFFFFF00); REG_WRITE(ah, AR_SLP32_INC, REG_READ(ah, AR_SLP32_INC) & 0xFFF00000); /* xxx Required for WLAN only case ? */ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0); udelay(100); } /* * Clear the RTC force wake bit to allow the * mac to go to sleep. */ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); if (AR_SREV_9462(ah)) udelay(100); if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); /* Shutdown chip. Active low */ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) { REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); udelay(2); } } /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ if (AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } /* * Notify Power Management is enabled in self-generating * frames. If request, set power mode of chip to * auto/normal. Duration in units of 128us (1/8 TU). */ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) { u32 val; REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); if (setChip) { struct ath9k_hw_capabilities *pCap = &ah->caps; if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { /* Set WakeOnInterrupt bit; clear ForceWake bit */ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); } else { /* When chip goes into network sleep, it could be waken * up by MCI_INT interrupt caused by BT's HW messages * (LNA_xxx, CONT_xxx) which chould be in a very fast * rate (~100us). This will cause chip to leave and * re-enter network sleep mode frequently, which in * consequence will have WLAN MCI HW to generate lots of * SYS_WAKING and SYS_SLEEPING messages which will make * BT CPU to busy to process. */ if (AR_SREV_9462(ah)) { val = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) & ~AR_MCI_INTERRUPT_RX_HW_MSG_MASK; REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, val); } /* * Clear the RTC force wake bit to allow the * mac to go to sleep. */ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); if (AR_SREV_9462(ah)) udelay(30); } } /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */ if (AR_SREV_9300_20_OR_LATER(ah)) REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip) { u32 val; int i; /* Set Bits 14 and 17 of AR_WA before powering on the chip. */ if (AR_SREV_9300_20_OR_LATER(ah)) { REG_WRITE(ah, AR_WA, ah->WARegVal); udelay(10); } if (setChip) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { return false; } if (!AR_SREV_9300_20_OR_LATER(ah)) ath9k_hw_init_pll(ah, NULL); } if (AR_SREV_9100(ah)) REG_SET_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); udelay(50); for (i = POWER_UP_TIME / 50; i > 0; i--) { val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; if (val == AR_RTC_STATUS_ON) break; udelay(50); REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); } if (i == 0) { ath_err(ath9k_hw_common(ah), "Failed to wakeup in %uus\n", POWER_UP_TIME / 20); return false; } } REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); return true; } bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) { struct ath_common *common = ath9k_hw_common(ah); int status = true, setChip = true; static const char *modes[] = { "AWAKE", "FULL-SLEEP", "NETWORK SLEEP", "UNDEFINED" }; if (ah->power_mode == mode) return status; ath_dbg(common, RESET, "%s -> %s\n", modes[ah->power_mode], modes[mode]); switch (mode) { case ATH9K_PM_AWAKE: status = ath9k_hw_set_power_awake(ah, setChip); if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); break; case ATH9K_PM_FULL_SLEEP: if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) ar9003_mci_set_full_sleep(ah); ath9k_set_power_sleep(ah, setChip); ah->chip_fullsleep = true; break; case ATH9K_PM_NETWORK_SLEEP: if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2); ath9k_set_power_network_sleep(ah, setChip); break; default: ath_err(common, "Unknown power mode %u\n", mode); return false; } ah->power_mode = mode; /* * XXX: If this warning never comes up after a while then * simply keep the ATH_DBG_WARN_ON_ONCE() but make * ath9k_hw_setpower() return type void. */ if (!(ah->ah_flags & AH_UNPLUGGED)) ATH_DBG_WARN_ON_ONCE(!status); return status; } EXPORT_SYMBOL(ath9k_hw_setpower); /*******************/ /* Beacon Handling */ /*******************/ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) { int flags = 0; ENABLE_REGWRITE_BUFFER(ah); switch (ah->opmode) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: REG_SET_BIT(ah, AR_TXCFG, AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon + TU_TO_USEC(ah->atim_window ? ah->atim_window : 1)); flags |= AR_NDP_TIMER_EN; case NL80211_IFTYPE_AP: REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon); REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon - TU_TO_USEC(ah->config.dma_beacon_response_time)); REG_WRITE(ah, AR_NEXT_SWBA, next_beacon - TU_TO_USEC(ah->config.sw_beacon_response_time)); flags |= AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; break; default: ath_dbg(ath9k_hw_common(ah), BEACON, "%s: unsupported opmode: %d\n", __func__, ah->opmode); return; break; } REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period); REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period); REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period); REG_WRITE(ah, AR_NDP_PERIOD, beacon_period); REGWRITE_BUFFER_FLUSH(ah); REG_SET_BIT(ah, AR_TIMER_MODE, flags); } EXPORT_SYMBOL(ath9k_hw_beaconinit); void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, const struct ath9k_beacon_state *bs) { u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(bs->bs_intval)); REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(bs->bs_intval)); REGWRITE_BUFFER_FLUSH(ah); REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); beaconintval = bs->bs_intval; if (bs->bs_sleepduration > beaconintval) beaconintval = bs->bs_sleepduration; dtimperiod = bs->bs_dtimperiod; if (bs->bs_sleepduration > dtimperiod) dtimperiod = bs->bs_sleepduration; if (beaconintval == dtimperiod) nextTbtt = bs->bs_nextdtim; else nextTbtt = bs->bs_nexttbtt; ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim); ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt); ath_dbg(common, BEACON, "beacon period %d\n", beaconintval); ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_NEXT_DTIM, TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP)); REG_WRITE(ah, AR_SLEEP1, SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT) | AR_SLEEP1_ASSUME_DTIM); if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP) beacontimeout = (BEACON_TIMEOUT_VAL << 3); else beacontimeout = MIN_BEACON_TIMEOUT_VAL; REG_WRITE(ah, AR_SLEEP2, SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT)); REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval)); REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); REGWRITE_BUFFER_FLUSH(ah); REG_SET_BIT(ah, AR_TIMER_MODE, AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | AR_DTIM_TIMER_EN); /* TSF Out of Range Threshold */ REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold); } EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers); /*******************/ /* HW Capabilities */ /*******************/ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask) { eeprom_chainmask &= chip_chainmask; if (eeprom_chainmask) return eeprom_chainmask; else return chip_chainmask; } /** * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset * @ah: the atheros hardware data structure * * We enable DFS support upstream on chipsets which have passed a series * of tests. The testing requirements are going to be documented. Desired * test requirements are documented at: * * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs * * Once a new chipset gets properly tested an individual commit can be used * to document the testing for DFS for that chipset. */ static bool ath9k_hw_dfs_tested(struct ath_hw *ah) { switch (ah->hw_version.macVersion) { /* AR9580 will likely be our first target to get testing on */ case AR_SREV_VERSION_9580: default: return false; } } int ath9k_hw_fill_cap_info(struct ath_hw *ah) { struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_common *common = ath9k_hw_common(ah); unsigned int chip_chainmask; u16 eeval; u8 ant_div_ctl1, tx_chainmask, rx_chainmask; eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); regulatory->current_rd = eeval; if (ah->opmode != NL80211_IFTYPE_AP && ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) { if (regulatory->current_rd == 0x64 || regulatory->current_rd == 0x65) regulatory->current_rd += 5; else if (regulatory->current_rd == 0x41) regulatory->current_rd = 0x43; ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n", regulatory->current_rd); } eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) { ath_err(common, "no band has been marked as supported in EEPROM\n"); return -EINVAL; } if (eeval & AR5416_OPFLAGS_11A) pCap->hw_caps |= ATH9K_HW_CAP_5GHZ; if (eeval & AR5416_OPFLAGS_11G) pCap->hw_caps |= ATH9K_HW_CAP_2GHZ; if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) chip_chainmask = 1; else if (AR_SREV_9462(ah)) chip_chainmask = 3; else if (!AR_SREV_9280_20_OR_LATER(ah)) chip_chainmask = 7; else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah)) chip_chainmask = 3; else chip_chainmask = 7; pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); /* * For AR9271 we will temporarilly uses the rx chainmax as read from * the EEPROM. */ if ((ah->hw_version.devid == AR5416_DEVID_PCI) && !(eeval & AR5416_OPFLAGS_11A) && !(AR_SREV_9271(ah))) /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */ pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7; else if (AR_SREV_9100(ah)) pCap->rx_chainmask = 0x7; else /* Use rx_chainmask from EEPROM. */ pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask); pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask); ah->txchainmask = pCap->tx_chainmask; ah->rxchainmask = pCap->rx_chainmask; ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA; /* enable key search for every frame in an aggregate */ if (AR_SREV_9300_20_OR_LATER(ah)) ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH; common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM; if (ah->hw_version.devid != AR2427_DEVID_PCIE) pCap->hw_caps |= ATH9K_HW_CAP_HT; else pCap->hw_caps &= ~ATH9K_HW_CAP_HT; if (AR_SREV_9271(ah)) pCap->num_gpio_pins = AR9271_NUM_GPIO; else if (AR_DEVID_7010(ah)) pCap->num_gpio_pins = AR7010_NUM_GPIO; else if (AR_SREV_9300_20_OR_LATER(ah)) pCap->num_gpio_pins = AR9300_NUM_GPIO; else if (AR_SREV_9287_11_OR_LATER(ah)) pCap->num_gpio_pins = AR9287_NUM_GPIO; else if (AR_SREV_9285_12_OR_LATER(ah)) pCap->num_gpio_pins = AR9285_NUM_GPIO; else if (AR_SREV_9280_20_OR_LATER(ah)) pCap->num_gpio_pins = AR928X_NUM_GPIO; else pCap->num_gpio_pins = AR_NUM_GPIO; if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; else pCap->rts_aggr_limit = (8 * 1024); #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); if (ah->rfsilent & EEP_RFSILENT_ENABLED) { ah->rfkill_gpio = MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL); ah->rfkill_polarity = MS(ah->rfsilent, EEP_RFSILENT_POLARITY); pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; } #endif if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah)) pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP; else pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; else pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; if (AR_SREV_9300_20_OR_LATER(ah)) { pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK; if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah)) pCap->hw_caps |= ATH9K_HW_CAP_LDPC; pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH; pCap->rx_status_len = sizeof(struct ar9003_rxs); pCap->tx_desc_len = sizeof(struct ar9003_txc); pCap->txs_len = sizeof(struct ar9003_txs); if (!ah->config.paprd_disable && ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; } else { pCap->tx_desc_len = sizeof(struct ath_desc); if (AR_SREV_9280_20(ah)) pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK; } if (AR_SREV_9300_20_OR_LATER(ah)) pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; if (AR_SREV_9300_20_OR_LATER(ah)) ah->ent_mode = REG_READ(ah, AR_ENT_OTP); if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; if (AR_SREV_9285(ah)) if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) { ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; } if (AR_SREV_9300_20_OR_LATER(ah)) { if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE)) pCap->hw_caps |= ATH9K_HW_CAP_APM; } if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); /* * enable the diversity-combining algorithm only when * both enable_lna_div and enable_fast_div are set * Table for Diversity * ant_div_alt_lnaconf bit 0-1 * ant_div_main_lnaconf bit 2-3 * ant_div_alt_gaintb bit 4 * ant_div_main_gaintb bit 5 * enable_ant_div_lnadiv bit 6 * enable_ant_fast_div bit 7 */ if ((ant_div_ctl1 >> 0x6) == 0x3) pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; } if (AR_SREV_9485_10(ah)) { pCap->pcie_lcr_extsync_en = true; pCap->pcie_lcr_offset = 0x80; } if (ath9k_hw_dfs_tested(ah)) pCap->hw_caps |= ATH9K_HW_CAP_DFS; tx_chainmask = pCap->tx_chainmask; rx_chainmask = pCap->rx_chainmask; while (tx_chainmask || rx_chainmask) { if (tx_chainmask & BIT(0)) pCap->max_txchains++; if (rx_chainmask & BIT(0)) pCap->max_rxchains++; tx_chainmask >>= 1; rx_chainmask >>= 1; } if (AR_SREV_9300_20_OR_LATER(ah)) { ah->enabled_cals |= TX_IQ_CAL; if (AR_SREV_9485_OR_LATER(ah)) ah->enabled_cals |= TX_IQ_ON_AGC_CAL; } if (AR_SREV_9462(ah)) { if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE)) pCap->hw_caps |= ATH9K_HW_CAP_MCI; if (AR_SREV_9462_20(ah)) pCap->hw_caps |= ATH9K_HW_CAP_RTT; } return 0; } /****************************/ /* GPIO / RFKILL / Antennae */ /****************************/ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type) { int addr; u32 gpio_shift, tmp; if (gpio > 11) addr = AR_GPIO_OUTPUT_MUX3; else if (gpio > 5) addr = AR_GPIO_OUTPUT_MUX2; else addr = AR_GPIO_OUTPUT_MUX1; gpio_shift = (gpio % 6) * 5; if (AR_SREV_9280_20_OR_LATER(ah) || (addr != AR_GPIO_OUTPUT_MUX1)) { REG_RMW(ah, addr, (type << gpio_shift), (0x1f << gpio_shift)); } else { tmp = REG_READ(ah, addr); tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0); tmp &= ~(0x1f << gpio_shift); tmp |= (type << gpio_shift); REG_WRITE(ah, addr, tmp); } } void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) { u32 gpio_shift; BUG_ON(gpio >= ah->caps.num_gpio_pins); if (AR_DEVID_7010(ah)) { gpio_shift = gpio; REG_RMW(ah, AR7010_GPIO_OE, (AR7010_GPIO_OE_AS_INPUT << gpio_shift), (AR7010_GPIO_OE_MASK << gpio_shift)); return; } gpio_shift = gpio << 1; REG_RMW(ah, AR_GPIO_OE_OUT, (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), (AR_GPIO_OE_OUT_DRV << gpio_shift)); } EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input); u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) { #define MS_REG_READ(x, y) \ (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y))) if (gpio >= ah->caps.num_gpio_pins) return 0xffffffff; if (AR_DEVID_7010(ah)) { u32 val; val = REG_READ(ah, AR7010_GPIO_IN); return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; } else if (AR_SREV_9300_20_OR_LATER(ah)) return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0; else if (AR_SREV_9271(ah)) return MS_REG_READ(AR9271, gpio) != 0; else if (AR_SREV_9287_11_OR_LATER(ah)) return MS_REG_READ(AR9287, gpio) != 0; else if (AR_SREV_9285_12_OR_LATER(ah)) return MS_REG_READ(AR9285, gpio) != 0; else if (AR_SREV_9280_20_OR_LATER(ah)) return MS_REG_READ(AR928X, gpio) != 0; else return MS_REG_READ(AR, gpio) != 0; } EXPORT_SYMBOL(ath9k_hw_gpio_get); void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, u32 ah_signal_type) { u32 gpio_shift; if (AR_DEVID_7010(ah)) { gpio_shift = gpio; REG_RMW(ah, AR7010_GPIO_OE, (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift), (AR7010_GPIO_OE_MASK << gpio_shift)); return; } ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); gpio_shift = 2 * gpio; REG_RMW(ah, AR_GPIO_OE_OUT, (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), (AR_GPIO_OE_OUT_DRV << gpio_shift)); } EXPORT_SYMBOL(ath9k_hw_cfg_output); void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) { if (AR_DEVID_7010(ah)) { val = val ? 0 : 1; REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio), AR_GPIO_BIT(gpio)); return; } if (AR_SREV_9271(ah)) val = ~val; REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), AR_GPIO_BIT(gpio)); } EXPORT_SYMBOL(ath9k_hw_set_gpio); void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) { REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); } EXPORT_SYMBOL(ath9k_hw_setantenna); /*********************/ /* General Operation */ /*********************/ u32 ath9k_hw_getrxfilter(struct ath_hw *ah) { u32 bits = REG_READ(ah, AR_RX_FILTER); u32 phybits = REG_READ(ah, AR_PHY_ERR); if (phybits & AR_PHY_ERR_RADAR) bits |= ATH9K_RX_FILTER_PHYRADAR; if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING)) bits |= ATH9K_RX_FILTER_PHYERR; return bits; } EXPORT_SYMBOL(ath9k_hw_getrxfilter); void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) { u32 phybits; ENABLE_REGWRITE_BUFFER(ah); if (AR_SREV_9462(ah)) bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER; REG_WRITE(ah, AR_RX_FILTER, bits); phybits = 0; if (bits & ATH9K_RX_FILTER_PHYRADAR) phybits |= AR_PHY_ERR_RADAR; if (bits & ATH9K_RX_FILTER_PHYERR) phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING; REG_WRITE(ah, AR_PHY_ERR, phybits); if (phybits) REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA); else REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA); REGWRITE_BUFFER_FLUSH(ah); } EXPORT_SYMBOL(ath9k_hw_setrxfilter); bool ath9k_hw_phy_disable(struct ath_hw *ah) { if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) return false; ath9k_hw_init_pll(ah, NULL); ah->htc_reset_init = true; return true; } EXPORT_SYMBOL(ath9k_hw_phy_disable); bool ath9k_hw_disable(struct ath_hw *ah) { if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return false; if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD)) return false; ath9k_hw_init_pll(ah, NULL); return true; } EXPORT_SYMBOL(ath9k_hw_disable); static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan) { enum eeprom_param gain_param; if (IS_CHAN_2GHZ(chan)) gain_param = EEP_ANTENNA_GAIN_2G; else gain_param = EEP_ANTENNA_GAIN_5G; return ah->eep_ops->get_eeprom(ah, gain_param); } void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan, bool test) { struct ath_regulatory *reg = ath9k_hw_regulatory(ah); struct ieee80211_channel *channel; int chan_pwr, new_pwr, max_gain; int ant_gain, ant_reduction = 0; if (!chan) return; channel = chan->chan; chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER); new_pwr = min_t(int, chan_pwr, reg->power_limit); max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2; ant_gain = get_antenna_gain(ah, chan); if (ant_gain > max_gain) ant_reduction = ant_gain - max_gain; ah->eep_ops->set_txpower(ah, chan, ath9k_regd_get_ctl(reg, chan), ant_reduction, new_pwr, test); } void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) { struct ath_regulatory *reg = ath9k_hw_regulatory(ah); struct ath9k_channel *chan = ah->curchan; struct ieee80211_channel *channel = chan->chan; reg->power_limit = min_t(u32, limit, MAX_RATE_POWER); if (test) channel->max_power = MAX_RATE_POWER / 2; ath9k_hw_apply_txpower(ah, chan, test); if (test) channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2); } EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); void ath9k_hw_setopmode(struct ath_hw *ah) { ath9k_hw_set_operating_mode(ah, ah->opmode); } EXPORT_SYMBOL(ath9k_hw_setopmode); void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1) { REG_WRITE(ah, AR_MCAST_FIL0, filter0); REG_WRITE(ah, AR_MCAST_FIL1, filter1); } EXPORT_SYMBOL(ath9k_hw_setmcastfilter); void ath9k_hw_write_associd(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid)); REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) | ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S)); } EXPORT_SYMBOL(ath9k_hw_write_associd); #define ATH9K_MAX_TSF_READ 10 u64 ath9k_hw_gettsf64(struct ath_hw *ah) { u32 tsf_lower, tsf_upper1, tsf_upper2; int i; tsf_upper1 = REG_READ(ah, AR_TSF_U32); for (i = 0; i < ATH9K_MAX_TSF_READ; i++) { tsf_lower = REG_READ(ah, AR_TSF_L32); tsf_upper2 = REG_READ(ah, AR_TSF_U32); if (tsf_upper2 == tsf_upper1) break; tsf_upper1 = tsf_upper2; } WARN_ON( i == ATH9K_MAX_TSF_READ ); return (((u64)tsf_upper1 << 32) | tsf_lower); } EXPORT_SYMBOL(ath9k_hw_gettsf64); void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64) { REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); } EXPORT_SYMBOL(ath9k_hw_settsf64); void ath9k_hw_reset_tsf(struct ath_hw *ah) { if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, AH_TSF_WRITE_TIMEOUT)) ath_dbg(ath9k_hw_common(ah), RESET, "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); } EXPORT_SYMBOL(ath9k_hw_reset_tsf); void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting) { if (setting) ah->misc_mode |= AR_PCU_TX_ADD_TSF; else ah->misc_mode &= ~AR_PCU_TX_ADD_TSF; } EXPORT_SYMBOL(ath9k_hw_set_tsfadjust); void ath9k_hw_set11nmac2040(struct ath_hw *ah) { struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; u32 macmode; if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca) macmode = AR_2040_JOINED_RX_CLEAR; else macmode = 0; REG_WRITE(ah, AR_2040_MODE, macmode); } /* HW Generic timers configuration */ static const struct ath_gen_timer_configuration gen_tmr_configuration[] = { {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, {AR_NEXT_NDP2_TIMER, AR_NDP2_PERIOD, AR_NDP2_TIMER_MODE, 0x0001}, {AR_NEXT_NDP2_TIMER + 1*4, AR_NDP2_PERIOD + 1*4, AR_NDP2_TIMER_MODE, 0x0002}, {AR_NEXT_NDP2_TIMER + 2*4, AR_NDP2_PERIOD + 2*4, AR_NDP2_TIMER_MODE, 0x0004}, {AR_NEXT_NDP2_TIMER + 3*4, AR_NDP2_PERIOD + 3*4, AR_NDP2_TIMER_MODE, 0x0008}, {AR_NEXT_NDP2_TIMER + 4*4, AR_NDP2_PERIOD + 4*4, AR_NDP2_TIMER_MODE, 0x0010}, {AR_NEXT_NDP2_TIMER + 5*4, AR_NDP2_PERIOD + 5*4, AR_NDP2_TIMER_MODE, 0x0020}, {AR_NEXT_NDP2_TIMER + 6*4, AR_NDP2_PERIOD + 6*4, AR_NDP2_TIMER_MODE, 0x0040}, {AR_NEXT_NDP2_TIMER + 7*4, AR_NDP2_PERIOD + 7*4, AR_NDP2_TIMER_MODE, 0x0080} }; /* HW generic timer primitives */ /* compute and clear index of rightmost 1 */ static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask) { u32 b; b = *mask; b &= (0-b); *mask &= ~b; b *= debruijn32; b >>= 27; return timer_table->gen_timer_index[b]; } u32 ath9k_hw_gettsf32(struct ath_hw *ah) { return REG_READ(ah, AR_TSF_L32); } EXPORT_SYMBOL(ath9k_hw_gettsf32); struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, void (*trigger)(void *), void (*overflow)(void *), void *arg, u8 timer_index) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; struct ath_gen_timer *timer; timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); if (timer == NULL) { ath_err(ath9k_hw_common(ah), "Failed to allocate memory for hw timer[%d]\n", timer_index); return NULL; } /* allocate a hardware generic timer slot */ timer_table->timers[timer_index] = timer; timer->index = timer_index; timer->trigger = trigger; timer->overflow = overflow; timer->arg = arg; return timer; } EXPORT_SYMBOL(ath_gen_timer_alloc); void ath9k_hw_gen_timer_start(struct ath_hw *ah, struct ath_gen_timer *timer, u32 trig_timeout, u32 timer_period) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; u32 tsf, timer_next; BUG_ON(!timer_period); set_bit(timer->index, &timer_table->timer_mask.timer_bits); tsf = ath9k_hw_gettsf32(ah); timer_next = tsf + trig_timeout; ath_dbg(ath9k_hw_common(ah), HWTIMER, "current tsf %x period %x timer_next %x\n", tsf, timer_period, timer_next); /* * Program generic timer registers */ REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr, timer_next); REG_WRITE(ah, gen_tmr_configuration[timer->index].period_addr, timer_period); REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, gen_tmr_configuration[timer->index].mode_mask); if (AR_SREV_9462(ah)) { /* * Starting from AR9462, each generic timer can select which tsf * to use. But we still follow the old rule, 0 - 7 use tsf and * 8 - 15 use tsf2. */ if ((timer->index < AR_GEN_TIMER_BANK_1_LEN)) REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL, (1 << timer->index)); else REG_SET_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL, (1 << timer->index)); } /* Enable both trigger and thresh interrupt masks */ REG_SET_BIT(ah, AR_IMR_S5, (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); } EXPORT_SYMBOL(ath9k_hw_gen_timer_start); void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; if ((timer->index < AR_FIRST_NDP_TIMER) || (timer->index >= ATH_MAX_GEN_TIMER)) { return; } /* Clear generic timer enable bits. */ REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, gen_tmr_configuration[timer->index].mode_mask); /* Disable both trigger and thresh interrupt masks */ REG_CLR_BIT(ah, AR_IMR_S5, (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); clear_bit(timer->index, &timer_table->timer_mask.timer_bits); } EXPORT_SYMBOL(ath9k_hw_gen_timer_stop); void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; /* free the hardware generic timer slot */ timer_table->timers[timer->index] = NULL; kfree(timer); } EXPORT_SYMBOL(ath_gen_timer_free); /* * Generic Timer Interrupts handling */ void ath_gen_timer_isr(struct ath_hw *ah) { struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; struct ath_gen_timer *timer; struct ath_common *common = ath9k_hw_common(ah); u32 trigger_mask, thresh_mask, index; /* get hardware generic timer interrupt status */ trigger_mask = ah->intr_gen_timer_trigger; thresh_mask = ah->intr_gen_timer_thresh; trigger_mask &= timer_table->timer_mask.val; thresh_mask &= timer_table->timer_mask.val; trigger_mask &= ~thresh_mask; while (thresh_mask) { index = rightmost_index(timer_table, &thresh_mask); timer = timer_table->timers[index]; BUG_ON(!timer); ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n", index); timer->overflow(timer->arg); } while (trigger_mask) { index = rightmost_index(timer_table, &trigger_mask); timer = timer_table->timers[index]; BUG_ON(!timer); ath_dbg(common, HWTIMER, "Gen timer[%d] trigger\n", index); timer->trigger(timer->arg); } } EXPORT_SYMBOL(ath_gen_timer_isr); /********/ /* HTC */ /********/ static struct { u32 version; const char * name; } ath_mac_bb_names[] = { /* Devices with external radios */ { AR_SREV_VERSION_5416_PCI, "5416" }, { AR_SREV_VERSION_5416_PCIE, "5418" }, { AR_SREV_VERSION_9100, "9100" }, { AR_SREV_VERSION_9160, "9160" }, /* Single-chip solutions */ { AR_SREV_VERSION_9280, "9280" }, { AR_SREV_VERSION_9285, "9285" }, { AR_SREV_VERSION_9287, "9287" }, { AR_SREV_VERSION_9271, "9271" }, { AR_SREV_VERSION_9300, "9300" }, { AR_SREV_VERSION_9330, "9330" }, { AR_SREV_VERSION_9340, "9340" }, { AR_SREV_VERSION_9485, "9485" }, { AR_SREV_VERSION_9462, "9462" }, }; /* For devices with external radios */ static struct { u16 version; const char * name; } ath_rf_names[] = { { 0, "5133" }, { AR_RAD5133_SREV_MAJOR, "5133" }, { AR_RAD5122_SREV_MAJOR, "5122" }, { AR_RAD2133_SREV_MAJOR, "2133" }, { AR_RAD2122_SREV_MAJOR, "2122" } }; /* * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. */ static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version) { int i; for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) { if (ath_mac_bb_names[i].version == mac_bb_version) { return ath_mac_bb_names[i].name; } } return "????"; } /* * Return the RF name. "????" is returned if the RF is unknown. * Used for devices with external radios. */ static const char *ath9k_hw_rf_name(u16 rf_version) { int i; for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) { if (ath_rf_names[i].version == rf_version) { return ath_rf_names[i].name; } } return "????"; } void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len) { int used; /* chipsets >= AR9280 are single-chip */ if (AR_SREV_9280_20_OR_LATER(ah)) { used = snprintf(hw_name, len, "Atheros AR%s Rev:%x", ath9k_hw_mac_bb_name(ah->hw_version.macVersion), ah->hw_version.macRev); } else { used = snprintf(hw_name, len, "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x", ath9k_hw_mac_bb_name(ah->hw_version.macVersion), ah->hw_version.macRev, ath9k_hw_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)), ah->hw_version.phyRev); } hw_name[used] = '\0'; } EXPORT_SYMBOL(ath9k_hw_name);
gpl-2.0
Starship-Android/starship_kernel_moto_shamu
drivers/spi/spi-imx.c
2104
24890
/* * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2008 Juergen Beisert * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02110-1301, USA. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/types.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_data/spi-imx.h> #define DRIVER_NAME "spi_imx" #define MXC_CSPIRXDATA 0x00 #define MXC_CSPITXDATA 0x04 #define MXC_CSPICTRL 0x08 #define MXC_CSPIINT 0x0c #define MXC_RESET 0x1c /* generic defines to abstract from the different register layouts */ #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ struct spi_imx_config { unsigned int speed_hz; unsigned int bpw; unsigned int mode; u8 cs; }; enum spi_imx_devtype { IMX1_CSPI, IMX21_CSPI, IMX27_CSPI, IMX31_CSPI, IMX35_CSPI, /* CSPI on all i.mx except above */ IMX51_ECSPI, /* ECSPI on i.mx51 and later */ }; struct spi_imx_data; struct spi_imx_devtype_data { void (*intctrl)(struct spi_imx_data *, int); int (*config)(struct spi_imx_data *, struct spi_imx_config *); void (*trigger)(struct spi_imx_data *); int (*rx_available)(struct spi_imx_data *); void (*reset)(struct spi_imx_data *); enum spi_imx_devtype devtype; }; struct spi_imx_data { struct spi_bitbang bitbang; struct completion xfer_done; void __iomem *base; int irq; struct clk *clk_per; struct clk *clk_ipg; unsigned long spi_clk; unsigned int count; void (*tx)(struct spi_imx_data *); void (*rx)(struct spi_imx_data *); void *rx_buf; const void *tx_buf; unsigned int txfifo; /* number of words pushed in tx FIFO */ const struct spi_imx_devtype_data *devtype_data; int chipselect[0]; }; static inline int is_imx27_cspi(struct spi_imx_data *d) { return d->devtype_data->devtype == IMX27_CSPI; } static inline int is_imx35_cspi(struct spi_imx_data *d) { return d->devtype_data->devtype == IMX35_CSPI; } static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d) { return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8; } #define MXC_SPI_BUF_RX(type) \ static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \ { \ unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \ \ if (spi_imx->rx_buf) { \ *(type *)spi_imx->rx_buf = val; \ spi_imx->rx_buf += sizeof(type); \ } \ } #define MXC_SPI_BUF_TX(type) \ static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \ { \ type val = 0; \ \ if (spi_imx->tx_buf) { \ val = *(type *)spi_imx->tx_buf; \ spi_imx->tx_buf += sizeof(type); \ } \ \ spi_imx->count -= sizeof(type); \ \ writel(val, spi_imx->base + MXC_CSPITXDATA); \ } MXC_SPI_BUF_RX(u8) MXC_SPI_BUF_TX(u8) MXC_SPI_BUF_RX(u16) MXC_SPI_BUF_TX(u16) MXC_SPI_BUF_RX(u32) MXC_SPI_BUF_TX(u32) /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set * (which is currently not the case in this driver) */ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024}; /* MX21, MX27 */ static unsigned int spi_imx_clkdiv_1(unsigned int fin, unsigned int fspi, unsigned int max) { int i; for (i = 2; i < max; i++) if (fspi * mxc_clkdivs[i] >= fin) return i; return max; } /* MX1, MX31, MX35, MX51 CSPI */ static unsigned int spi_imx_clkdiv_2(unsigned int fin, unsigned int fspi) { int i, div = 4; for (i = 0; i < 7; i++) { if (fspi * div >= fin) return i; div <<= 1; } return 7; } #define MX51_ECSPI_CTRL 0x08 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) #define MX51_ECSPI_CTRL_XCH (1 << 2) #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18) #define MX51_ECSPI_CTRL_BL_OFFSET 20 #define MX51_ECSPI_CONFIG 0x0c #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20)) #define MX51_ECSPI_INT 0x10 #define MX51_ECSPI_INT_TEEN (1 << 0) #define MX51_ECSPI_INT_RREN (1 << 3) #define MX51_ECSPI_STAT 0x18 #define MX51_ECSPI_STAT_RR (1 << 3) /* MX51 eCSPI */ static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi) { /* * there are two 4-bit dividers, the pre-divider divides by * $pre, the post-divider by 2^$post */ unsigned int pre, post; if (unlikely(fspi > fin)) return 0; post = fls(fin) - fls(fspi); if (fin > fspi << post) post++; /* now we have: (fin <= fspi << post) with post being minimal */ post = max(4U, post) - 4; if (unlikely(post > 0xf)) { pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", __func__, fspi, fin); return 0xff; } pre = DIV_ROUND_UP(fin, fspi << post) - 1; pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", __func__, fin, fspi, post, pre); return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) | (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET); } static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned val = 0; if (enable & MXC_INT_TE) val |= MX51_ECSPI_INT_TEEN; if (enable & MXC_INT_RR) val |= MX51_ECSPI_INT_RREN; writel(val, spi_imx->base + MX51_ECSPI_INT); } static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx) { u32 reg; reg = readl(spi_imx->base + MX51_ECSPI_CTRL); reg |= MX51_ECSPI_CTRL_XCH; writel(reg, spi_imx->base + MX51_ECSPI_CTRL); } static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0; /* * The hardware seems to have a race condition when changing modes. The * current assumption is that the selection of the channel arrives * earlier in the hardware than the mode bits when they are written at * the same time. * So set master mode for all channels as we do not support slave mode. */ ctrl |= MX51_ECSPI_CTRL_MODE_MASK; /* set clock speed */ ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz); /* set chip select to use */ ctrl |= MX51_ECSPI_CTRL_CS(config->cs); ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET; cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs); if (config->mode & SPI_CPHA) cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); if (config->mode & SPI_CPOL) { cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); } if (config->mode & SPI_CS_HIGH) cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); return 0; } static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR; } static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx) { /* drain receive buffer */ while (mx51_ecspi_rx_available(spi_imx)) readl(spi_imx->base + MXC_CSPIRXDATA); } #define MX31_INTREG_TEEN (1 << 0) #define MX31_INTREG_RREN (1 << 3) #define MX31_CSPICTRL_ENABLE (1 << 0) #define MX31_CSPICTRL_MASTER (1 << 1) #define MX31_CSPICTRL_XCH (1 << 2) #define MX31_CSPICTRL_POL (1 << 4) #define MX31_CSPICTRL_PHA (1 << 5) #define MX31_CSPICTRL_SSCTL (1 << 6) #define MX31_CSPICTRL_SSPOL (1 << 7) #define MX31_CSPICTRL_BC_SHIFT 8 #define MX35_CSPICTRL_BL_SHIFT 20 #define MX31_CSPICTRL_CS_SHIFT 24 #define MX35_CSPICTRL_CS_SHIFT 12 #define MX31_CSPICTRL_DR_SHIFT 16 #define MX31_CSPISTATUS 0x14 #define MX31_STATUS_RR (1 << 3) /* These functions also work for the i.MX35, but be aware that * the i.MX35 has a slightly different register layout for bits * we do not use here. */ static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; if (enable & MXC_INT_TE) val |= MX31_INTREG_TEEN; if (enable & MXC_INT_RR) val |= MX31_INTREG_RREN; writel(val, spi_imx->base + MXC_CSPIINT); } static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; reg = readl(spi_imx->base + MXC_CSPICTRL); reg |= MX31_CSPICTRL_XCH; writel(reg, spi_imx->base + MXC_CSPICTRL); } static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; int cs = spi_imx->chipselect[config->cs]; reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << MX31_CSPICTRL_DR_SHIFT; if (is_imx35_cspi(spi_imx)) { reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; reg |= MX31_CSPICTRL_SSCTL; } else { reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; } if (config->mode & SPI_CPHA) reg |= MX31_CSPICTRL_PHA; if (config->mode & SPI_CPOL) reg |= MX31_CSPICTRL_POL; if (config->mode & SPI_CS_HIGH) reg |= MX31_CSPICTRL_SSPOL; if (cs < 0) reg |= (cs + 32) << (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT : MX31_CSPICTRL_CS_SHIFT); writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; } static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx) { /* drain receive buffer */ while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR) readl(spi_imx->base + MXC_CSPIRXDATA); } #define MX21_INTREG_RR (1 << 4) #define MX21_INTREG_TEEN (1 << 9) #define MX21_INTREG_RREN (1 << 13) #define MX21_CSPICTRL_POL (1 << 5) #define MX21_CSPICTRL_PHA (1 << 6) #define MX21_CSPICTRL_SSPOL (1 << 8) #define MX21_CSPICTRL_XCH (1 << 9) #define MX21_CSPICTRL_ENABLE (1 << 10) #define MX21_CSPICTRL_MASTER (1 << 11) #define MX21_CSPICTRL_DR_SHIFT 14 #define MX21_CSPICTRL_CS_SHIFT 19 static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; if (enable & MXC_INT_TE) val |= MX21_INTREG_TEEN; if (enable & MXC_INT_RR) val |= MX21_INTREG_RREN; writel(val, spi_imx->base + MXC_CSPIINT); } static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; reg = readl(spi_imx->base + MXC_CSPICTRL); reg |= MX21_CSPICTRL_XCH; writel(reg, spi_imx->base + MXC_CSPICTRL); } static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; int cs = spi_imx->chipselect[config->cs]; unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) << MX21_CSPICTRL_DR_SHIFT; reg |= config->bpw - 1; if (config->mode & SPI_CPHA) reg |= MX21_CSPICTRL_PHA; if (config->mode & SPI_CPOL) reg |= MX21_CSPICTRL_POL; if (config->mode & SPI_CS_HIGH) reg |= MX21_CSPICTRL_SSPOL; if (cs < 0) reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT; writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR; } static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx) { writel(1, spi_imx->base + MXC_RESET); } #define MX1_INTREG_RR (1 << 3) #define MX1_INTREG_TEEN (1 << 8) #define MX1_INTREG_RREN (1 << 11) #define MX1_CSPICTRL_POL (1 << 4) #define MX1_CSPICTRL_PHA (1 << 5) #define MX1_CSPICTRL_XCH (1 << 8) #define MX1_CSPICTRL_ENABLE (1 << 9) #define MX1_CSPICTRL_MASTER (1 << 10) #define MX1_CSPICTRL_DR_SHIFT 13 static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; if (enable & MXC_INT_TE) val |= MX1_INTREG_TEEN; if (enable & MXC_INT_RR) val |= MX1_INTREG_RREN; writel(val, spi_imx->base + MXC_CSPIINT); } static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; reg = readl(spi_imx->base + MXC_CSPICTRL); reg |= MX1_CSPICTRL_XCH; writel(reg, spi_imx->base + MXC_CSPICTRL); } static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << MX1_CSPICTRL_DR_SHIFT; reg |= config->bpw - 1; if (config->mode & SPI_CPHA) reg |= MX1_CSPICTRL_PHA; if (config->mode & SPI_CPOL) reg |= MX1_CSPICTRL_POL; writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; } static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) { writel(1, spi_imx->base + MXC_RESET); } static struct spi_imx_devtype_data imx1_cspi_devtype_data = { .intctrl = mx1_intctrl, .config = mx1_config, .trigger = mx1_trigger, .rx_available = mx1_rx_available, .reset = mx1_reset, .devtype = IMX1_CSPI, }; static struct spi_imx_devtype_data imx21_cspi_devtype_data = { .intctrl = mx21_intctrl, .config = mx21_config, .trigger = mx21_trigger, .rx_available = mx21_rx_available, .reset = mx21_reset, .devtype = IMX21_CSPI, }; static struct spi_imx_devtype_data imx27_cspi_devtype_data = { /* i.mx27 cspi shares the functions with i.mx21 one */ .intctrl = mx21_intctrl, .config = mx21_config, .trigger = mx21_trigger, .rx_available = mx21_rx_available, .reset = mx21_reset, .devtype = IMX27_CSPI, }; static struct spi_imx_devtype_data imx31_cspi_devtype_data = { .intctrl = mx31_intctrl, .config = mx31_config, .trigger = mx31_trigger, .rx_available = mx31_rx_available, .reset = mx31_reset, .devtype = IMX31_CSPI, }; static struct spi_imx_devtype_data imx35_cspi_devtype_data = { /* i.mx35 and later cspi shares the functions with i.mx31 one */ .intctrl = mx31_intctrl, .config = mx31_config, .trigger = mx31_trigger, .rx_available = mx31_rx_available, .reset = mx31_reset, .devtype = IMX35_CSPI, }; static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { .intctrl = mx51_ecspi_intctrl, .config = mx51_ecspi_config, .trigger = mx51_ecspi_trigger, .rx_available = mx51_ecspi_rx_available, .reset = mx51_ecspi_reset, .devtype = IMX51_ECSPI, }; static struct platform_device_id spi_imx_devtype[] = { { .name = "imx1-cspi", .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data, }, { .name = "imx21-cspi", .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data, }, { .name = "imx27-cspi", .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data, }, { .name = "imx31-cspi", .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data, }, { .name = "imx35-cspi", .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data, }, { .name = "imx51-ecspi", .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data, }, { /* sentinel */ } }; static const struct of_device_id spi_imx_dt_ids[] = { { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, }, { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, }, { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, { /* sentinel */ } }; static void spi_imx_chipselect(struct spi_device *spi, int is_active) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); int gpio = spi_imx->chipselect[spi->chip_select]; int active = is_active != BITBANG_CS_INACTIVE; int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH); if (!gpio_is_valid(gpio)) return; gpio_set_value(gpio, dev_is_lowactive ^ active); } static void spi_imx_push(struct spi_imx_data *spi_imx) { while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) { if (!spi_imx->count) break; spi_imx->tx(spi_imx); spi_imx->txfifo++; } spi_imx->devtype_data->trigger(spi_imx); } static irqreturn_t spi_imx_isr(int irq, void *dev_id) { struct spi_imx_data *spi_imx = dev_id; while (spi_imx->devtype_data->rx_available(spi_imx)) { spi_imx->rx(spi_imx); spi_imx->txfifo--; } if (spi_imx->count) { spi_imx_push(spi_imx); return IRQ_HANDLED; } if (spi_imx->txfifo) { /* No data left to push, but still waiting for rx data, * enable receive data available interrupt. */ spi_imx->devtype_data->intctrl( spi_imx, MXC_INT_RR); return IRQ_HANDLED; } spi_imx->devtype_data->intctrl(spi_imx, 0); complete(&spi_imx->xfer_done); return IRQ_HANDLED; } static int spi_imx_setupxfer(struct spi_device *spi, struct spi_transfer *t) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); struct spi_imx_config config; config.bpw = t ? t->bits_per_word : spi->bits_per_word; config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; config.mode = spi->mode; config.cs = spi->chip_select; if (!config.speed_hz) config.speed_hz = spi->max_speed_hz; if (!config.bpw) config.bpw = spi->bits_per_word; /* Initialize the functions for transfer */ if (config.bpw <= 8) { spi_imx->rx = spi_imx_buf_rx_u8; spi_imx->tx = spi_imx_buf_tx_u8; } else if (config.bpw <= 16) { spi_imx->rx = spi_imx_buf_rx_u16; spi_imx->tx = spi_imx_buf_tx_u16; } else if (config.bpw <= 32) { spi_imx->rx = spi_imx_buf_rx_u32; spi_imx->tx = spi_imx_buf_tx_u32; } else BUG(); spi_imx->devtype_data->config(spi_imx, &config); return 0; } static int spi_imx_transfer(struct spi_device *spi, struct spi_transfer *transfer) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); spi_imx->tx_buf = transfer->tx_buf; spi_imx->rx_buf = transfer->rx_buf; spi_imx->count = transfer->len; spi_imx->txfifo = 0; init_completion(&spi_imx->xfer_done); spi_imx_push(spi_imx); spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); wait_for_completion(&spi_imx->xfer_done); return transfer->len; } static int spi_imx_setup(struct spi_device *spi) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); int gpio = spi_imx->chipselect[spi->chip_select]; dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, spi->mode, spi->bits_per_word, spi->max_speed_hz); if (gpio_is_valid(gpio)) gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); spi_imx_chipselect(spi, BITBANG_CS_INACTIVE); return 0; } static void spi_imx_cleanup(struct spi_device *spi) { } static int spi_imx_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_id = of_match_device(spi_imx_dt_ids, &pdev->dev); struct spi_imx_master *mxc_platform_info = dev_get_platdata(&pdev->dev); struct spi_master *master; struct spi_imx_data *spi_imx; struct resource *res; struct pinctrl *pinctrl; int i, ret, num_cs; if (!np && !mxc_platform_info) { dev_err(&pdev->dev, "can't get the platform data\n"); return -EINVAL; } ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs); if (ret < 0) { if (mxc_platform_info) num_cs = mxc_platform_info->num_chipselect; else return ret; } master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data) + sizeof(int) * num_cs); if (!master) return -ENOMEM; platform_set_drvdata(pdev, master); master->bus_num = pdev->id; master->num_chipselect = num_cs; spi_imx = spi_master_get_devdata(master); spi_imx->bitbang.master = spi_master_get(master); for (i = 0; i < master->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); if (!gpio_is_valid(cs_gpio) && mxc_platform_info) cs_gpio = mxc_platform_info->chipselect[i]; spi_imx->chipselect[i] = cs_gpio; if (!gpio_is_valid(cs_gpio)) continue; ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); if (ret) { dev_err(&pdev->dev, "can't get cs gpios\n"); goto out_gpio_free; } } spi_imx->bitbang.chipselect = spi_imx_chipselect; spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; spi_imx->bitbang.txrx_bufs = spi_imx_transfer; spi_imx->bitbang.master->setup = spi_imx_setup; spi_imx->bitbang.master->cleanup = spi_imx_cleanup; spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; init_completion(&spi_imx->xfer_done); spi_imx->devtype_data = of_id ? of_id->data : (struct spi_imx_devtype_data *) pdev->id_entry->driver_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get platform resource\n"); ret = -ENOMEM; goto out_gpio_free; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { dev_err(&pdev->dev, "request_mem_region failed\n"); ret = -EBUSY; goto out_gpio_free; } spi_imx->base = ioremap(res->start, resource_size(res)); if (!spi_imx->base) { ret = -EINVAL; goto out_release_mem; } spi_imx->irq = platform_get_irq(pdev, 0); if (spi_imx->irq < 0) { ret = -EINVAL; goto out_iounmap; } ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx); if (ret) { dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); goto out_iounmap; } pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) { ret = PTR_ERR(pinctrl); goto out_free_irq; } spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(spi_imx->clk_ipg)) { ret = PTR_ERR(spi_imx->clk_ipg); goto out_free_irq; } spi_imx->clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(spi_imx->clk_per)) { ret = PTR_ERR(spi_imx->clk_per); goto out_free_irq; } clk_prepare_enable(spi_imx->clk_per); clk_prepare_enable(spi_imx->clk_ipg); spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); spi_imx->devtype_data->reset(spi_imx); spi_imx->devtype_data->intctrl(spi_imx, 0); master->dev.of_node = pdev->dev.of_node; ret = spi_bitbang_start(&spi_imx->bitbang); if (ret) { dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); goto out_clk_put; } dev_info(&pdev->dev, "probed\n"); return ret; out_clk_put: clk_disable_unprepare(spi_imx->clk_per); clk_disable_unprepare(spi_imx->clk_ipg); out_free_irq: free_irq(spi_imx->irq, spi_imx); out_iounmap: iounmap(spi_imx->base); out_release_mem: release_mem_region(res->start, resource_size(res)); out_gpio_free: while (--i >= 0) { if (gpio_is_valid(spi_imx->chipselect[i])) gpio_free(spi_imx->chipselect[i]); } spi_master_put(master); kfree(master); platform_set_drvdata(pdev, NULL); return ret; } static int spi_imx_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct spi_imx_data *spi_imx = spi_master_get_devdata(master); int i; spi_bitbang_stop(&spi_imx->bitbang); writel(0, spi_imx->base + MXC_CSPICTRL); clk_disable_unprepare(spi_imx->clk_per); clk_disable_unprepare(spi_imx->clk_ipg); free_irq(spi_imx->irq, spi_imx); iounmap(spi_imx->base); for (i = 0; i < master->num_chipselect; i++) if (gpio_is_valid(spi_imx->chipselect[i])) gpio_free(spi_imx->chipselect[i]); spi_master_put(master); release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver spi_imx_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = spi_imx_dt_ids, }, .id_table = spi_imx_devtype, .probe = spi_imx_probe, .remove = spi_imx_remove, }; module_platform_driver(spi_imx_driver); MODULE_DESCRIPTION("SPI Master Controller driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
RicoP/vlcfork
modules/gui/skins2/events/evt_mouse.cpp
57
1824
/***************************************************************************** * evt_mouse.cpp ***************************************************************************** * Copyright (C) 2003 the VideoLAN team * $Id$ * * Authors: Cyril Deguet <asmax@via.ecp.fr> * Olivier Teulière <ipkiss@via.ecp.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #include "evt_mouse.hpp" const string EvtMouse::getAsString() const { string event = "mouse"; // Add the button if( m_button == kLeft ) event += ":left"; else if( m_button == kMiddle ) event += ":middle"; else if( m_button == kRight ) event += ":right"; else msg_Warn( getIntf(), "unknown button type" ); // Add the action if( m_action == kDown ) event += ":down"; else if( m_action == kUp ) event += ":up"; else if( m_action == kDblClick ) event += ":dblclick"; else msg_Warn( getIntf(), "unknown action type" ); // Add the modifier addModifier( event ); return event; }
gpl-2.0
brymaster5000/m7-501
drivers/media/video/videobuf-msm-mem.c
825
9573
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * Based on videobuf-dma-contig.c, * (c) 2008 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * helper functions for physically contiguous pmem capture buffers * The functions support contiguous memory allocations using pmem * kernel API. */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/io.h> #include <linux/android_pmem.h> #include <linux/memory_alloc.h> #include <media/videobuf-msm-mem.h> #include <media/msm_camera.h> #include <mach/memory.h> #define MAGIC_PMEM 0x0733ac64 #define MAGIC_CHECK(is, should) \ if (unlikely((is) != (should))) { \ pr_err("magic mismatch: %x expected %x\n", (is), (should)); \ BUG(); \ } #ifdef CONFIG_MSM_CAMERA_DEBUG #define D(fmt, args...) printk(KERN_DEBUG "videobuf-msm-mem: " fmt, ##args) #else #define D(fmt, args...) do {} while (0) #endif static int32_t msm_mem_allocate(const size_t size) { int32_t phyaddr; phyaddr = allocate_contiguous_ebi_nomap(size, SZ_4K); return phyaddr; } static int32_t msm_mem_free(const int32_t phyaddr) { int32_t rc = 0; free_contiguous_memory_by_paddr(phyaddr); return rc; } static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; D("vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); map->count++; } static void videobuf_vm_close(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; struct videobuf_queue *q = map->q; int i, rc; D("vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); map->count--; if (0 == map->count) { struct videobuf_contig_pmem *mem; D("munmap %p q=%p\n", map, q); mutex_lock(&q->vb_lock); /* We need first to cancel streams, before unmapping */ if (q->streaming) videobuf_queue_cancel(q); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; if (q->bufs[i]->map != map) continue; mem = q->bufs[i]->priv; if (mem) { /* This callback is called only if kernel has * allocated memory and this memory is mmapped. * In this case, memory should be freed, * in order to do memory unmap. */ MAGIC_CHECK(mem->magic, MAGIC_PMEM); /* vfree is not atomic - can't be called with IRQ's disabled */ D("buf[%d] freeing physical %d\n", i, mem->phyaddr); rc = msm_mem_free(mem->phyaddr); if (rc < 0) D("%s: Invalid memory location\n", __func__); else { mem->phyaddr = 0; } } q->bufs[i]->map = NULL; q->bufs[i]->baddr = 0; } kfree(map); mutex_unlock(&q->vb_lock); /* deallocate the q->bufs[i] structure not a good solution as it will result in unnecessary iterations but right now this looks like the only cleaner way */ videobuf_mmap_free(q); } } static const struct vm_operations_struct videobuf_vm_ops = { .open = videobuf_vm_open, .close = videobuf_vm_close, }; /** * videobuf_pmem_contig_user_put() - reset pointer to user space buffer * @mem: per-buffer private videobuf-contig-pmem data * * This function resets the user space pointer */ static void videobuf_pmem_contig_user_put(struct videobuf_contig_pmem *mem) { if (mem->phyaddr) { put_pmem_file(mem->file); mem->is_userptr = 0; mem->phyaddr = 0; mem->size = 0; } } /** * videobuf_pmem_contig_user_get() - setup user space memory pointer * @mem: per-buffer private videobuf-contig-pmem data * @vb: video buffer to map * * This function validates and sets up a pointer to user space memory. * Only physically contiguous pfn-mapped memory is accepted. * * Returns 0 if successful. */ static int videobuf_pmem_contig_user_get(struct videobuf_contig_pmem *mem, struct videobuf_buffer *vb) { unsigned long kvstart; unsigned long len; int rc; mem->size = PAGE_ALIGN(vb->size); rc = get_pmem_file(vb->baddr, (unsigned long *)&mem->phyaddr, &kvstart, &len, &mem->file); if (rc < 0) { pr_err("%s: get_pmem_file fd %lu error %d\n", __func__, vb->baddr, rc); return rc; } mem->phyaddr += vb->boff; mem->y_off = 0; mem->cbcr_off = (vb->size)*2/3; mem->is_userptr = 1; return rc; } static struct videobuf_buffer *__videobuf_alloc(size_t size) { struct videobuf_contig_pmem *mem; struct videobuf_buffer *vb; vb = kzalloc(size + sizeof(*mem), GFP_KERNEL); if (vb) { mem = vb->priv = ((char *)vb) + size; mem->magic = MAGIC_PMEM; } return vb; } static void *__videobuf_to_vaddr(struct videobuf_buffer *buf) { struct videobuf_contig_pmem *mem = buf->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_PMEM); return mem->vaddr; } static int __videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, struct v4l2_framebuffer *fbuf) { int rc = 0; struct videobuf_contig_pmem *mem = vb->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_PMEM); switch (vb->memory) { case V4L2_MEMORY_MMAP: D("%s memory method MMAP\n", __func__); /* All handling should be done by __videobuf_mmap_mapper() */ break; case V4L2_MEMORY_USERPTR: D("%s memory method USERPTR\n", __func__); /* handle pointer from user space */ rc = videobuf_pmem_contig_user_get(mem, vb); break; case V4L2_MEMORY_OVERLAY: default: pr_err("%s memory method OVERLAY/unknown\n", __func__); rc = -EINVAL; } return rc; } static int __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) { struct videobuf_contig_pmem *mem; struct videobuf_mapping *map; int retval; unsigned long size; D("%s\n", __func__); /* create mapping + update buffer list */ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); if (!map) { pr_err("%s: kzalloc failed.\n", __func__); return -ENOMEM; } buf->map = map; map->q = q; buf->baddr = vma->vm_start; mem = buf->priv; D("mem = 0x%x\n", (u32)mem); D("buf = 0x%x\n", (u32)buf); BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_PMEM); mem->size = PAGE_ALIGN(buf->bsize); mem->y_off = 0; mem->cbcr_off = (buf->bsize)*2/3; if (buf->i >= 0 && buf->i <= 3) mem->buffer_type = OUTPUT_TYPE_P; else mem->buffer_type = OUTPUT_TYPE_V; buf->bsize = mem->size; mem->phyaddr = msm_mem_allocate(mem->size); if (!mem->phyaddr) { pr_err("%s : pmem memory allocation failed\n", __func__); goto error; } /* Try to remap memory */ size = vma->vm_end - vma->vm_start; size = (size < mem->size) ? size : mem->size; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); retval = remap_pfn_range(vma, vma->vm_start, mem->phyaddr >> PAGE_SHIFT, size, vma->vm_page_prot); if (retval) { pr_err("mmap: remap failed with error %d. ", retval); retval = msm_mem_free(mem->phyaddr); if (retval < 0) printk(KERN_ERR "%s: Invalid memory location\n", __func__); else { mem->phyaddr = 0; } goto error; } vma->vm_ops = &videobuf_vm_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_private_data = map; D("mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n", map, q, vma->vm_start, vma->vm_end, (long int)buf->bsize, vma->vm_pgoff, buf->i); videobuf_vm_open(vma); return 0; error: kfree(map); return -ENOMEM; } static struct videobuf_qtype_ops qops = { .magic = MAGIC_QTYPE_OPS, .alloc_vb = __videobuf_alloc, .iolock = __videobuf_iolock, .mmap_mapper = __videobuf_mmap_mapper, .vaddr = __videobuf_to_vaddr, }; void videobuf_queue_pmem_contig_init(struct videobuf_queue *q, const struct videobuf_queue_ops *ops, struct device *dev, spinlock_t *irqlock, enum v4l2_buf_type type, enum v4l2_field field, unsigned int msize, void *priv, struct mutex *ext_lock) { videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize, priv, &qops, ext_lock); } EXPORT_SYMBOL_GPL(videobuf_queue_pmem_contig_init); int videobuf_to_pmem_contig(struct videobuf_buffer *buf) { struct videobuf_contig_pmem *mem = buf->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_PMEM); return mem->phyaddr; } EXPORT_SYMBOL_GPL(videobuf_to_pmem_contig); int videobuf_pmem_contig_free(struct videobuf_queue *q, struct videobuf_buffer *buf) { struct videobuf_contig_pmem *mem = buf->priv; /* mmapped memory can't be freed here, otherwise mmapped region would be released, while still needed. In this case, the memory release should happen inside videobuf_vm_close(). So, it should free memory only if the memory were allocated for read() operation. */ if (buf->memory != V4L2_MEMORY_USERPTR) return -EINVAL; if (!mem) return -ENOMEM; MAGIC_CHECK(mem->magic, MAGIC_PMEM); /* handle user space pointer case */ if (buf->baddr) { videobuf_pmem_contig_user_put(mem); return 0; } else { /* don't support read() method */ return -EINVAL; } } EXPORT_SYMBOL_GPL(videobuf_pmem_contig_free); MODULE_DESCRIPTION("helper module to manage video4linux PMEM contig buffers"); MODULE_LICENSE("GPL v2");
gpl-2.0
mtanski/linux-fs
drivers/hsi/controllers/omap_ssi_port.c
825
41834
/* OMAP SSI port driver. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> * * Contact: Carlos Chinea <carlos.chinea@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/of_gpio.h> #include <linux/debugfs.h> #include "omap_ssi_regs.h" #include "omap_ssi.h" static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) { return 0; } static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) { return 0; } static inline unsigned int ssi_wakein(struct hsi_port *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); return gpio_get_value(omap_port->wake_gpio); } #ifdef CONFIG_DEBUG_FS static void ssi_debug_remove_port(struct hsi_port *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); debugfs_remove_recursive(omap_port->dir); } static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused) { struct hsi_port *port = m->private; struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *base = omap_ssi->sys; unsigned int ch; pm_runtime_get_sync(omap_port->pdev); if (omap_port->wake_irq > 0) seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); seq_printf(m, "WAKE\t\t: 0x%08x\n", readl(base + SSI_WAKE_REG(port->num))); seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, readl(base + SSI_MPU_STATUS_REG(port->num, 0))); /* SST */ base = omap_port->sst_base; seq_puts(m, "\nSST\n===\n"); seq_printf(m, "ID SST\t\t: 0x%08x\n", readl(base + SSI_SST_ID_REG)); seq_printf(m, "MODE\t\t: 0x%08x\n", readl(base + SSI_SST_MODE_REG)); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", readl(base + SSI_SST_FRAMESIZE_REG)); seq_printf(m, "DIVISOR\t\t: 0x%08x\n", readl(base + SSI_SST_DIVISOR_REG)); seq_printf(m, "CHANNELS\t: 0x%08x\n", readl(base + SSI_SST_CHANNELS_REG)); seq_printf(m, "ARBMODE\t\t: 0x%08x\n", readl(base + SSI_SST_ARBMODE_REG)); seq_printf(m, "TXSTATE\t\t: 0x%08x\n", readl(base + SSI_SST_TXSTATE_REG)); seq_printf(m, "BUFSTATE\t: 0x%08x\n", readl(base + SSI_SST_BUFSTATE_REG)); seq_printf(m, "BREAK\t\t: 0x%08x\n", readl(base + SSI_SST_BREAK_REG)); for (ch = 0; ch < omap_port->channels; ch++) { seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, readl(base + SSI_SST_BUFFER_CH_REG(ch))); } /* SSR */ base = omap_port->ssr_base; seq_puts(m, "\nSSR\n===\n"); seq_printf(m, "ID SSR\t\t: 0x%08x\n", readl(base + SSI_SSR_ID_REG)); seq_printf(m, "MODE\t\t: 0x%08x\n", readl(base + SSI_SSR_MODE_REG)); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", readl(base + SSI_SSR_FRAMESIZE_REG)); seq_printf(m, "CHANNELS\t: 0x%08x\n", readl(base + SSI_SSR_CHANNELS_REG)); seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", readl(base + SSI_SSR_TIMEOUT_REG)); seq_printf(m, "RXSTATE\t\t: 0x%08x\n", readl(base + SSI_SSR_RXSTATE_REG)); seq_printf(m, "BUFSTATE\t: 0x%08x\n", readl(base + SSI_SSR_BUFSTATE_REG)); seq_printf(m, "BREAK\t\t: 0x%08x\n", readl(base + SSI_SSR_BREAK_REG)); seq_printf(m, "ERROR\t\t: 0x%08x\n", readl(base + SSI_SSR_ERROR_REG)); seq_printf(m, "ERRORACK\t: 0x%08x\n", readl(base + SSI_SSR_ERRORACK_REG)); for (ch = 0; ch < omap_port->channels; ch++) { seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, readl(base + SSI_SSR_BUFFER_CH_REG(ch))); } pm_runtime_put_sync(omap_port->pdev); return 0; } static int ssi_port_regs_open(struct inode *inode, struct file *file) { return single_open(file, ssi_debug_port_show, inode->i_private); } static const struct file_operations ssi_port_regs_fops = { .open = ssi_port_regs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ssi_div_get(void *data, u64 *val) { struct hsi_port *port = data; struct omap_ssi_port *omap_port = hsi_port_drvdata(port); pm_runtime_get_sync(omap_port->pdev); *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); pm_runtime_put_sync(omap_port->pdev); return 0; } static int ssi_div_set(void *data, u64 val) { struct hsi_port *port = data; struct omap_ssi_port *omap_port = hsi_port_drvdata(port); if (val > 127) return -EINVAL; pm_runtime_get_sync(omap_port->pdev); writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); omap_port->sst.divisor = val; pm_runtime_put_sync(omap_port->pdev); return 0; } DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port, struct dentry *dir) { struct hsi_port *port = to_hsi_port(omap_port->dev); dir = debugfs_create_dir(dev_name(omap_port->dev), dir); if (!dir) return -ENOMEM; omap_port->dir = dir; debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); dir = debugfs_create_dir("sst", dir); if (!dir) return -ENOMEM; debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port, &ssi_sst_div_fops); return 0; } #endif static int ssi_claim_lch(struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(msg->cl); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); int lch; for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) if (!omap_ssi->gdd_trn[lch].msg) { omap_ssi->gdd_trn[lch].msg = msg; omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; return lch; } return -EBUSY; } static int ssi_start_dma(struct hsi_msg *msg, int lch) { struct hsi_port *port = hsi_get_port(msg->cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *gdd = omap_ssi->gdd; int err; u16 csdp; u16 ccr; u32 s_addr; u32 d_addr; u32 tmp; if (msg->ttype == HSI_MSG_READ) { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_FROM_DEVICE); if (err < 0) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); return err; } csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | SSI_DATA_TYPE_S32; ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | SSI_CCR_ENABLE; s_addr = omap_port->ssr_dma + SSI_SSR_BUFFER_CH_REG(msg->channel); d_addr = sg_dma_address(msg->sgt.sgl); } else { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_TO_DEVICE); if (err < 0) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); return err; } csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | SSI_DATA_TYPE_S32; ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | SSI_CCR_ENABLE; s_addr = sg_dma_address(msg->sgt.sgl); d_addr = omap_port->sst_dma + SSI_SST_BUFFER_CH_REG(msg->channel); } dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr); /* Hold clocks during the transfer */ pm_runtime_get_sync(omap_port->pdev); writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), gdd + SSI_GDD_CEN_REG(lch)); spin_lock_bh(&omap_ssi->lock); tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); tmp |= SSI_GDD_LCH(lch); writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); spin_unlock_bh(&omap_ssi->lock); writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); msg->status = HSI_STATUS_PROCEEDING; return 0; } static int ssi_start_pio(struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(msg->cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); u32 val; pm_runtime_get_sync(omap_port->pdev); if (msg->ttype == HSI_MSG_WRITE) { val = SSI_DATAACCEPT(msg->channel); /* Hold clocks for pio writes */ pm_runtime_get_sync(omap_port->pdev); } else { val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; } dev_dbg(&port->device, "Single %s transfer\n", msg->ttype ? "write" : "read"); val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); pm_runtime_put_sync(omap_port->pdev); msg->actual_len = 0; msg->status = HSI_STATUS_PROCEEDING; return 0; } static int ssi_start_transfer(struct list_head *queue) { struct hsi_msg *msg; int lch = -1; if (list_empty(queue)) return 0; msg = list_first_entry(queue, struct hsi_msg, link); if (msg->status != HSI_STATUS_QUEUED) return 0; if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) lch = ssi_claim_lch(msg); if (lch >= 0) return ssi_start_dma(msg, lch); else return ssi_start_pio(msg); } static int ssi_async_break(struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(msg->cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); int err = 0; u32 tmp; pm_runtime_get_sync(omap_port->pdev); if (msg->ttype == HSI_MSG_WRITE) { if (omap_port->sst.mode != SSI_MODE_FRAME) { err = -EINVAL; goto out; } writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); msg->status = HSI_STATUS_COMPLETED; msg->complete(msg); } else { if (omap_port->ssr.mode != SSI_MODE_FRAME) { err = -EINVAL; goto out; } spin_lock_bh(&omap_port->lock); tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel(tmp | SSI_BREAKDETECTED, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); msg->status = HSI_STATUS_PROCEEDING; list_add_tail(&msg->link, &omap_port->brkqueue); spin_unlock_bh(&omap_port->lock); } out: pm_runtime_put_sync(omap_port->pdev); return err; } static int ssi_async(struct hsi_msg *msg) { struct hsi_port *port = hsi_get_port(msg->cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct list_head *queue; int err = 0; BUG_ON(!msg); if (msg->sgt.nents > 1) return -ENOSYS; /* TODO: Add sg support */ if (msg->break_frame) return ssi_async_break(msg); if (msg->ttype) { BUG_ON(msg->channel >= omap_port->sst.channels); queue = &omap_port->txqueue[msg->channel]; } else { BUG_ON(msg->channel >= omap_port->ssr.channels); queue = &omap_port->rxqueue[msg->channel]; } msg->status = HSI_STATUS_QUEUED; spin_lock_bh(&omap_port->lock); list_add_tail(&msg->link, queue); err = ssi_start_transfer(queue); if (err < 0) { list_del(&msg->link); msg->status = HSI_STATUS_ERROR; } spin_unlock_bh(&omap_port->lock); dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", msg->status, msg->ttype, msg->channel); return err; } static u32 ssi_calculate_div(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); u32 tx_fckrate = (u32) omap_ssi->fck_rate; /* / 2 : SSI TX clock is always half of the SSI functional clock */ tx_fckrate >>= 1; /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ tx_fckrate--; dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, omap_ssi->max_speed); return tx_fckrate / omap_ssi->max_speed; } static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) { struct list_head *node, *tmp; struct hsi_msg *msg; list_for_each_safe(node, tmp, queue) { msg = list_entry(node, struct hsi_msg, link); if ((cl) && (cl != msg->cl)) continue; list_del(node); pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", msg->channel, msg, msg->sgt.sgl->length, msg->ttype, msg->context); if (msg->destructor) msg->destructor(msg); else hsi_free_msg(msg); } } static int ssi_setup(struct hsi_client *cl) { struct hsi_port *port = to_hsi_port(cl->device.parent); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *sst = omap_port->sst_base; void __iomem *ssr = omap_port->ssr_base; u32 div; u32 val; int err = 0; pm_runtime_get_sync(omap_port->pdev); spin_lock_bh(&omap_port->lock); if (cl->tx_cfg.speed) omap_ssi->max_speed = cl->tx_cfg.speed; div = ssi_calculate_div(ssi); if (div > SSI_MAX_DIVISOR) { dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", cl->tx_cfg.speed, div); err = -EINVAL; goto out; } /* Set TX/RX module to sleep to stop TX/RX during cfg update */ writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); /* Flush posted write */ val = readl(ssr + SSI_SSR_MODE_REG); /* TX */ writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG); writel_relaxed(div, sst + SSI_SST_DIVISOR_REG); writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG); writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); /* RX */ writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG); writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG); writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG); /* Cleanup the break queue if we leave FRAME mode */ if ((omap_port->ssr.mode == SSI_MODE_FRAME) && (cl->rx_cfg.mode != SSI_MODE_FRAME)) ssi_flush_queue(&omap_port->brkqueue, cl); writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); omap_port->channels = max(cl->rx_cfg.num_hw_channels, cl->tx_cfg.num_hw_channels); /* Shadow registering for OFF mode */ /* SST */ omap_port->sst.divisor = div; omap_port->sst.frame_size = 31; omap_port->sst.channels = cl->tx_cfg.num_hw_channels; omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; omap_port->sst.mode = cl->tx_cfg.mode; /* SSR */ omap_port->ssr.frame_size = 31; omap_port->ssr.timeout = 0; omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; omap_port->ssr.mode = cl->rx_cfg.mode; out: spin_unlock_bh(&omap_port->lock); pm_runtime_put_sync(omap_port->pdev); return err; } static int ssi_flush(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg; void __iomem *sst = omap_port->sst_base; void __iomem *ssr = omap_port->ssr_base; unsigned int i; u32 err; pm_runtime_get_sync(omap_port->pdev); spin_lock_bh(&omap_port->lock); /* Stop all DMA transfers */ for (i = 0; i < SSI_MAX_GDD_LCH; i++) { msg = omap_ssi->gdd_trn[i].msg; if (!msg || (port != hsi_get_port(msg->cl))) continue; writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); if (msg->ttype == HSI_MSG_READ) pm_runtime_put_sync(omap_port->pdev); omap_ssi->gdd_trn[i].msg = NULL; } /* Flush all SST buffers */ writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG); writel_relaxed(0, sst + SSI_SST_TXSTATE_REG); /* Flush all SSR buffers */ writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG); writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG); /* Flush all errors */ err = readl(ssr + SSI_SSR_ERROR_REG); writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG); /* Flush break */ writel_relaxed(0, ssr + SSI_SSR_BREAK_REG); /* Clear interrupts */ writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(0xffffff00, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); /* Dequeue all pending requests */ for (i = 0; i < omap_port->channels; i++) { /* Release write clocks */ if (!list_empty(&omap_port->txqueue[i])) pm_runtime_put_sync(omap_port->pdev); ssi_flush_queue(&omap_port->txqueue[i], NULL); ssi_flush_queue(&omap_port->rxqueue[i], NULL); } ssi_flush_queue(&omap_port->brkqueue, NULL); spin_unlock_bh(&omap_port->lock); pm_runtime_put_sync(omap_port->pdev); return 0; } static int ssi_start_tx(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); spin_lock_bh(&omap_port->wk_lock); if (omap_port->wk_refcount++) { spin_unlock_bh(&omap_port->wk_lock); return 0; } pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); spin_unlock_bh(&omap_port->wk_lock); return 0; } static int ssi_stop_tx(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); spin_lock_bh(&omap_port->wk_lock); BUG_ON(!omap_port->wk_refcount); if (--omap_port->wk_refcount) { spin_unlock_bh(&omap_port->wk_lock); return 0; } writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); pm_runtime_put_sync(omap_port->pdev); /* Release clocks */ spin_unlock_bh(&omap_port->wk_lock); return 0; } static void ssi_transfer(struct omap_ssi_port *omap_port, struct list_head *queue) { struct hsi_msg *msg; int err = -1; spin_lock_bh(&omap_port->lock); while (err < 0) { err = ssi_start_transfer(queue); if (err < 0) { msg = list_first_entry(queue, struct hsi_msg, link); msg->status = HSI_STATUS_ERROR; msg->actual_len = 0; list_del(&msg->link); spin_unlock_bh(&omap_port->lock); msg->complete(msg); spin_lock_bh(&omap_port->lock); } } spin_unlock_bh(&omap_port->lock); } static void ssi_cleanup_queues(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg; unsigned int i; u32 rxbufstate = 0; u32 txbufstate = 0; u32 status = SSI_ERROROCCURED; u32 tmp; ssi_flush_queue(&omap_port->brkqueue, cl); if (list_empty(&omap_port->brkqueue)) status |= SSI_BREAKDETECTED; for (i = 0; i < omap_port->channels; i++) { if (list_empty(&omap_port->txqueue[i])) continue; msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, link); if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { txbufstate |= (1 << i); status |= SSI_DATAACCEPT(i); /* Release the clocks writes, also GDD ones */ pm_runtime_put_sync(omap_port->pdev); } ssi_flush_queue(&omap_port->txqueue[i], cl); } for (i = 0; i < omap_port->channels; i++) { if (list_empty(&omap_port->rxqueue[i])) continue; msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, link); if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { rxbufstate |= (1 << i); status |= SSI_DATAAVAILABLE(i); } ssi_flush_queue(&omap_port->rxqueue[i], cl); /* Check if we keep the error detection interrupt armed */ if (!list_empty(&omap_port->rxqueue[i])) status &= ~SSI_ERROROCCURED; } /* Cleanup write buffers */ tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); tmp &= ~txbufstate; writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); /* Cleanup read buffers */ tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); tmp &= ~rxbufstate; writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); /* Disarm and ack pending interrupts */ tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); tmp &= ~status; writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(status, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); } static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_msg *msg; unsigned int i; u32 val = 0; u32 tmp; for (i = 0; i < SSI_MAX_GDD_LCH; i++) { msg = omap_ssi->gdd_trn[i].msg; if ((!msg) || (msg->cl != cl)) continue; writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); val |= (1 << i); /* * Clock references for write will be handled in * ssi_cleanup_queues */ if (msg->ttype == HSI_MSG_READ) pm_runtime_put_sync(omap_port->pdev); omap_ssi->gdd_trn[i].msg = NULL; } tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); tmp &= ~val; writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); } static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) { writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); /* OCP barrier */ mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); return 0; } static int ssi_release(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); spin_lock_bh(&omap_port->lock); pm_runtime_get_sync(omap_port->pdev); /* Stop all the pending DMA requests for that client */ ssi_cleanup_gdd(ssi, cl); /* Now cleanup all the queues */ ssi_cleanup_queues(cl); pm_runtime_put_sync(omap_port->pdev); /* If it is the last client of the port, do extra checks and cleanup */ if (port->claimed <= 1) { /* * Drop the clock reference for the incoming wake line * if it is still kept high by the other side. */ if (omap_port->wkin_cken) { pm_runtime_put_sync(omap_port->pdev); omap_port->wkin_cken = 0; } pm_runtime_get_sync(omap_port->pdev); /* Stop any SSI TX/RX without a client */ ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); omap_port->sst.mode = SSI_MODE_SLEEP; omap_port->ssr.mode = SSI_MODE_SLEEP; pm_runtime_put_sync(omap_port->pdev); WARN_ON(omap_port->wk_refcount != 0); } spin_unlock_bh(&omap_port->lock); return 0; } static void ssi_error(struct hsi_port *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg; unsigned int i; u32 err; u32 val; u32 tmp; /* ACK error */ err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); dev_err(&port->device, "SSI error: 0x%02x\n", err); if (!err) { dev_dbg(&port->device, "spurious SSI error ignored!\n"); return; } spin_lock(&omap_ssi->lock); /* Cancel all GDD read transfers */ for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { msg = omap_ssi->gdd_trn[i].msg; if ((msg) && (msg->ttype == HSI_MSG_READ)) { writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); val |= (1 << i); omap_ssi->gdd_trn[i].msg = NULL; } } tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); tmp &= ~val; writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); spin_unlock(&omap_ssi->lock); /* Cancel all PIO read transfers */ spin_lock(&omap_port->lock); tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); /* ACK error */ writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); writel_relaxed(SSI_ERROROCCURED, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); /* Signal the error all current pending read requests */ for (i = 0; i < omap_port->channels; i++) { if (list_empty(&omap_port->rxqueue[i])) continue; msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, link); list_del(&msg->link); msg->status = HSI_STATUS_ERROR; spin_unlock(&omap_port->lock); msg->complete(msg); /* Now restart queued reads if any */ ssi_transfer(omap_port, &omap_port->rxqueue[i]); spin_lock(&omap_port->lock); } spin_unlock(&omap_port->lock); } static void ssi_break_complete(struct hsi_port *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct hsi_msg *msg; struct hsi_msg *tmp; u32 val; dev_dbg(&port->device, "HWBREAK received\n"); spin_lock(&omap_port->lock); val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); val &= ~SSI_BREAKDETECTED; writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); writel(SSI_BREAKDETECTED, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); spin_unlock(&omap_port->lock); list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { msg->status = HSI_STATUS_COMPLETED; spin_lock(&omap_port->lock); list_del(&msg->link); spin_unlock(&omap_port->lock); msg->complete(msg); } } static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) { struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_msg *msg; u32 *buf; u32 reg; u32 val; spin_lock(&omap_port->lock); msg = list_first_entry(queue, struct hsi_msg, link); if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { msg->actual_len = 0; msg->status = HSI_STATUS_PENDING; } if (msg->ttype == HSI_MSG_WRITE) val = SSI_DATAACCEPT(msg->channel); else val = SSI_DATAAVAILABLE(msg->channel); if (msg->status == HSI_STATUS_PROCEEDING) { buf = sg_virt(msg->sgt.sgl) + msg->actual_len; if (msg->ttype == HSI_MSG_WRITE) writel(*buf, omap_port->sst_base + SSI_SST_BUFFER_CH_REG(msg->channel)); else *buf = readl(omap_port->ssr_base + SSI_SSR_BUFFER_CH_REG(msg->channel)); dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, msg->ttype, *buf); msg->actual_len += sizeof(*buf); if (msg->actual_len >= msg->sgt.sgl->length) msg->status = HSI_STATUS_COMPLETED; /* * Wait for the last written frame to be really sent before * we call the complete callback */ if ((msg->status == HSI_STATUS_PROCEEDING) || ((msg->status == HSI_STATUS_COMPLETED) && (msg->ttype == HSI_MSG_WRITE))) { writel(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); spin_unlock(&omap_port->lock); return; } } /* Transfer completed at this point */ reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); if (msg->ttype == HSI_MSG_WRITE) { /* Release clocks for write transfer */ pm_runtime_put_sync(omap_port->pdev); } reg &= ~val; writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); list_del(&msg->link); spin_unlock(&omap_port->lock); msg->complete(msg); ssi_transfer(omap_port, queue); } static void ssi_pio_tasklet(unsigned long ssi_port) { struct hsi_port *port = (struct hsi_port *)ssi_port; struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *sys = omap_ssi->sys; unsigned int ch; u32 status_reg; pm_runtime_get_sync(omap_port->pdev); status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); for (ch = 0; ch < omap_port->channels; ch++) { if (status_reg & SSI_DATAACCEPT(ch)) ssi_pio_complete(port, &omap_port->txqueue[ch]); if (status_reg & SSI_DATAAVAILABLE(ch)) ssi_pio_complete(port, &omap_port->rxqueue[ch]); } if (status_reg & SSI_BREAKDETECTED) ssi_break_complete(port); if (status_reg & SSI_ERROROCCURED) ssi_error(port); status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); pm_runtime_put_sync(omap_port->pdev); if (status_reg) tasklet_hi_schedule(&omap_port->pio_tasklet); else enable_irq(omap_port->irq); } static irqreturn_t ssi_pio_isr(int irq, void *port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); tasklet_hi_schedule(&omap_port->pio_tasklet); disable_irq_nosync(irq); return IRQ_HANDLED; } static void ssi_wake_tasklet(unsigned long ssi_port) { struct hsi_port *port = (struct hsi_port *)ssi_port; struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); if (ssi_wakein(port)) { /** * We can have a quick High-Low-High transition in the line. * In such a case if we have long interrupt latencies, * we can miss the low event or get twice a high event. * This workaround will avoid breaking the clock reference * count when such a situation ocurrs. */ spin_lock(&omap_port->lock); if (!omap_port->wkin_cken) { omap_port->wkin_cken = 1; pm_runtime_get_sync(omap_port->pdev); } spin_unlock(&omap_port->lock); dev_dbg(&ssi->device, "Wake in high\n"); if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); } hsi_event(port, HSI_EVENT_START_RX); } else { dev_dbg(&ssi->device, "Wake in low\n"); if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); } hsi_event(port, HSI_EVENT_STOP_RX); spin_lock(&omap_port->lock); if (omap_port->wkin_cken) { pm_runtime_put_sync(omap_port->pdev); omap_port->wkin_cken = 0; } spin_unlock(&omap_port->lock); } } static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port) { struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port); tasklet_hi_schedule(&omap_port->wake_tasklet); return IRQ_HANDLED; } static int __init ssi_port_irq(struct hsi_port *port, struct platform_device *pd) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); int err; err = platform_get_irq(pd, 0); if (err < 0) { dev_err(&port->device, "Port IRQ resource missing\n"); return err; } omap_port->irq = err; tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet, (unsigned long)port); err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr, 0, "mpu_irq0", port); if (err < 0) dev_err(&port->device, "Request IRQ %d failed (%d)\n", omap_port->irq, err); return err; } static int __init ssi_wake_irq(struct hsi_port *port, struct platform_device *pd) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); int cawake_irq; int err; if (omap_port->wake_gpio == -1) { omap_port->wake_irq = -1; return 0; } cawake_irq = gpio_to_irq(omap_port->wake_gpio); omap_port->wake_irq = cawake_irq; tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet, (unsigned long)port); err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "cawake", port); if (err < 0) dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", cawake_irq, err); err = enable_irq_wake(cawake_irq); if (err < 0) dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n", cawake_irq, err); return err; } static void __init ssi_queues_init(struct omap_ssi_port *omap_port) { unsigned int ch; for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { INIT_LIST_HEAD(&omap_port->txqueue[ch]); INIT_LIST_HEAD(&omap_port->rxqueue[ch]); } INIT_LIST_HEAD(&omap_port->brkqueue); } static int __init ssi_port_get_iomem(struct platform_device *pd, const char *name, void __iomem **pbase, dma_addr_t *phy) { struct hsi_port *port = platform_get_drvdata(pd); struct resource *mem; struct resource *ioarea; void __iomem *base; mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); if (!mem) { dev_err(&pd->dev, "IO memory region missing (%s)\n", name); return -ENXIO; } ioarea = devm_request_mem_region(&port->device, mem->start, resource_size(mem), dev_name(&pd->dev)); if (!ioarea) { dev_err(&pd->dev, "%s IO memory region request failed\n", mem->name); return -ENXIO; } base = devm_ioremap(&port->device, mem->start, resource_size(mem)); if (!base) { dev_err(&pd->dev, "%s IO remap failed\n", mem->name); return -ENXIO; } *pbase = base; if (phy) *phy = mem->start; return 0; } static int __init ssi_port_probe(struct platform_device *pd) { struct device_node *np = pd->dev.of_node; struct hsi_port *port; struct omap_ssi_port *omap_port; struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); u32 cawake_gpio = 0; u32 port_id; int err; dev_dbg(&pd->dev, "init ssi port...\n"); if (!try_module_get(ssi->owner)) { dev_err(&pd->dev, "could not increment parent module refcount\n"); return -ENODEV; } if (!ssi->port || !omap_ssi->port) { dev_err(&pd->dev, "ssi controller not initialized!\n"); err = -ENODEV; goto error; } /* get id of first uninitialized port in controller */ for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id]; port_id++) ; if (port_id >= ssi->num_ports) { dev_err(&pd->dev, "port id out of range!\n"); err = -ENODEV; goto error; } port = ssi->port[port_id]; if (!np) { dev_err(&pd->dev, "missing device tree data\n"); err = -EINVAL; goto error; } cawake_gpio = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0); if (cawake_gpio < 0) { dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n", cawake_gpio); err = -ENODEV; goto error; } err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN, "cawake"); if (err) { dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n", err); err = -ENXIO; goto error; } omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); if (!omap_port) { err = -ENOMEM; goto error; } omap_port->wake_gpio = cawake_gpio; omap_port->pdev = &pd->dev; omap_port->port_id = port_id; /* initialize HSI port */ port->async = ssi_async; port->setup = ssi_setup; port->flush = ssi_flush; port->start_tx = ssi_start_tx; port->stop_tx = ssi_stop_tx; port->release = ssi_release; hsi_port_set_drvdata(port, omap_port); omap_ssi->port[port_id] = omap_port; platform_set_drvdata(pd, port); err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, &omap_port->sst_dma); if (err < 0) goto error; err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, &omap_port->ssr_dma); if (err < 0) goto error; err = ssi_port_irq(port, pd); if (err < 0) goto error; err = ssi_wake_irq(port, pd); if (err < 0) goto error; ssi_queues_init(omap_port); spin_lock_init(&omap_port->lock); spin_lock_init(&omap_port->wk_lock); omap_port->dev = &port->device; pm_runtime_irq_safe(omap_port->pdev); pm_runtime_enable(omap_port->pdev); #ifdef CONFIG_DEBUG_FS err = ssi_debug_add_port(omap_port, omap_ssi->dir); if (err < 0) { pm_runtime_disable(omap_port->pdev); goto error; } #endif hsi_add_clients_from_dt(port, np); dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n", port_id, cawake_gpio); return 0; error: return err; } static int __exit ssi_port_remove(struct platform_device *pd) { struct hsi_port *port = platform_get_drvdata(pd); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); #ifdef CONFIG_DEBUG_FS ssi_debug_remove_port(port); #endif hsi_port_unregister_clients(port); tasklet_kill(&omap_port->wake_tasklet); tasklet_kill(&omap_port->pio_tasklet); port->async = hsi_dummy_msg; port->setup = hsi_dummy_cl; port->flush = hsi_dummy_cl; port->start_tx = hsi_dummy_cl; port->stop_tx = hsi_dummy_cl; port->release = hsi_dummy_cl; omap_ssi->port[omap_port->port_id] = NULL; platform_set_drvdata(pd, NULL); module_put(ssi->owner); pm_runtime_disable(&pd->dev); return 0; } #ifdef CONFIG_PM static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) { struct hsi_port *port = to_hsi_port(omap_port->dev); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); omap_port->sys_mpu_enable = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); return 0; } static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) { struct hsi_port *port = to_hsi_port(omap_port->dev); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); void __iomem *base; writel_relaxed(omap_port->sys_mpu_enable, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); /* SST context */ base = omap_port->sst_base; writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); /* SSR context */ base = omap_port->ssr_base; writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); return 0; } static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) { u32 mode; writel_relaxed(omap_port->sst.mode, omap_port->sst_base + SSI_SST_MODE_REG); writel_relaxed(omap_port->ssr.mode, omap_port->ssr_base + SSI_SSR_MODE_REG); /* OCP barrier */ mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); return 0; } static int ssi_restore_divisor(struct omap_ssi_port *omap_port) { writel_relaxed(omap_port->sst.divisor, omap_port->sst_base + SSI_SST_DIVISOR_REG); return 0; } static int omap_ssi_port_runtime_suspend(struct device *dev) { struct hsi_port *port = dev_get_drvdata(dev); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(dev, "port runtime suspend!\n"); ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); if (omap_ssi->get_loss) omap_port->loss_count = omap_ssi->get_loss(ssi->device.parent); ssi_save_port_ctx(omap_port); return 0; } static int omap_ssi_port_runtime_resume(struct device *dev) { struct hsi_port *port = dev_get_drvdata(dev); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(dev, "port runtime resume!\n"); if ((omap_ssi->get_loss) && (omap_port->loss_count == omap_ssi->get_loss(ssi->device.parent))) goto mode; /* We always need to restore the mode & TX divisor */ ssi_restore_port_ctx(omap_port); mode: ssi_restore_divisor(omap_port); ssi_restore_port_mode(omap_port); return 0; } static const struct dev_pm_ops omap_ssi_port_pm_ops = { SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend, omap_ssi_port_runtime_resume, NULL) }; #define DEV_PM_OPS (&omap_ssi_port_pm_ops) #else #define DEV_PM_OPS NULL #endif #ifdef CONFIG_OF static const struct of_device_id omap_ssi_port_of_match[] = { { .compatible = "ti,omap3-ssi-port", }, {}, }; MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match); #else #define omap_ssi_port_of_match NULL #endif static struct platform_driver ssi_port_pdriver = { .remove = __exit_p(ssi_port_remove), .driver = { .name = "omap_ssi_port", .of_match_table = omap_ssi_port_of_match, .pm = DEV_PM_OPS, }, }; module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe); MODULE_ALIAS("platform:omap_ssi_port"); MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
ruleless/linux
arch/sparc/prom/bootstr_64.c
1849
1178
/* * bootstr.c: Boot string/argument acquisition from the PROM. * * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright(C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/string.h> #include <linux/init.h> #include <asm/oplib.h> /* WARNING: The boot loader knows that these next three variables come one right * after another in the .data section. Do not move this stuff into * the .bss section or it will break things. */ /* We limit BARG_LEN to 1024 because this is the size of the * 'barg_out' command line buffer in the SILO bootloader. */ #define BARG_LEN 1024 struct { int bootstr_len; int bootstr_valid; char bootstr_buf[BARG_LEN]; } bootstr_info = { .bootstr_len = BARG_LEN, #ifdef CONFIG_CMDLINE .bootstr_valid = 1, .bootstr_buf = CONFIG_CMDLINE, #endif }; char * __init prom_getbootargs(void) { /* This check saves us from a panic when bootfd patches args. */ if (bootstr_info.bootstr_valid) return bootstr_info.bootstr_buf; prom_getstring(prom_chosen_node, "bootargs", bootstr_info.bootstr_buf, BARG_LEN); bootstr_info.bootstr_valid = 1; return bootstr_info.bootstr_buf; }
gpl-2.0
W4TCH0UT/zz_quark
arch/arm/mach-omap1/fpga.c
2361
5340
/* * linux/arch/arm/mach-omap1/fpga.c * * Interrupt handler for OMAP-1510 Innovator FPGA * * Copyright (C) 2001 RidgeRun, Inc. * Author: Greg Lonnon <glonnon@ridgerun.com> * * Copyright (C) 2002 MontaVista Software, Inc. * * Separated FPGA interrupts from innovator1510.c and cleaned up for 2.6 * Copyright (C) 2004 Nokia Corporation by Tony Lindrgen <tony@atomide.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include "iomap.h" #include "common.h" #include "fpga.h" static void fpga_mask_irq(struct irq_data *d) { unsigned int irq = d->irq - OMAP_FPGA_IRQ_BASE; if (irq < 8) __raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_LO) & ~(1 << irq)), OMAP1510_FPGA_IMR_LO); else if (irq < 16) __raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_HI) & ~(1 << (irq - 8))), OMAP1510_FPGA_IMR_HI); else __raw_writeb((__raw_readb(INNOVATOR_FPGA_IMR2) & ~(1 << (irq - 16))), INNOVATOR_FPGA_IMR2); } static inline u32 get_fpga_unmasked_irqs(void) { return ((__raw_readb(OMAP1510_FPGA_ISR_LO) & __raw_readb(OMAP1510_FPGA_IMR_LO))) | ((__raw_readb(OMAP1510_FPGA_ISR_HI) & __raw_readb(OMAP1510_FPGA_IMR_HI)) << 8) | ((__raw_readb(INNOVATOR_FPGA_ISR2) & __raw_readb(INNOVATOR_FPGA_IMR2)) << 16); } static void fpga_ack_irq(struct irq_data *d) { /* Don't need to explicitly ACK FPGA interrupts */ } static void fpga_unmask_irq(struct irq_data *d) { unsigned int irq = d->irq - OMAP_FPGA_IRQ_BASE; if (irq < 8) __raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_LO) | (1 << irq)), OMAP1510_FPGA_IMR_LO); else if (irq < 16) __raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_HI) | (1 << (irq - 8))), OMAP1510_FPGA_IMR_HI); else __raw_writeb((__raw_readb(INNOVATOR_FPGA_IMR2) | (1 << (irq - 16))), INNOVATOR_FPGA_IMR2); } static void fpga_mask_ack_irq(struct irq_data *d) { fpga_mask_irq(d); fpga_ack_irq(d); } static void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc) { u32 stat; int fpga_irq; stat = get_fpga_unmasked_irqs(); if (!stat) return; for (fpga_irq = OMAP_FPGA_IRQ_BASE; (fpga_irq < OMAP_FPGA_IRQ_END) && stat; fpga_irq++, stat >>= 1) { if (stat & 1) { generic_handle_irq(fpga_irq); } } } static struct irq_chip omap_fpga_irq_ack = { .name = "FPGA-ack", .irq_ack = fpga_mask_ack_irq, .irq_mask = fpga_mask_irq, .irq_unmask = fpga_unmask_irq, }; static struct irq_chip omap_fpga_irq = { .name = "FPGA", .irq_ack = fpga_ack_irq, .irq_mask = fpga_mask_irq, .irq_unmask = fpga_unmask_irq, }; /* * All of the FPGA interrupt request inputs except for the touchscreen are * edge-sensitive; the touchscreen is level-sensitive. The edge-sensitive * interrupts are acknowledged as a side-effect of reading the interrupt * status register from the FPGA. The edge-sensitive interrupt inputs * cause a problem with level interrupt requests, such as Ethernet. The * problem occurs when a level interrupt request is asserted while its * interrupt input is masked in the FPGA, which results in a missed * interrupt. * * In an attempt to workaround the problem with missed interrupts, the * mask_ack routine for all of the FPGA interrupts has been changed from * fpga_mask_ack_irq() to fpga_ack_irq() so that the specific FPGA interrupt * being serviced is left unmasked. We can do this because the FPGA cascade * interrupt is installed with the IRQF_DISABLED flag, which leaves all * interrupts masked at the CPU while an FPGA interrupt handler executes. * * Limited testing indicates that this workaround appears to be effective * for the smc9194 Ethernet driver used on the Innovator. It should work * on other FPGA interrupts as well, but any drivers that explicitly mask * interrupts at the interrupt controller via disable_irq/enable_irq * could pose a problem. */ void omap1510_fpga_init_irq(void) { int i, res; __raw_writeb(0, OMAP1510_FPGA_IMR_LO); __raw_writeb(0, OMAP1510_FPGA_IMR_HI); __raw_writeb(0, INNOVATOR_FPGA_IMR2); for (i = OMAP_FPGA_IRQ_BASE; i < OMAP_FPGA_IRQ_END; i++) { if (i == OMAP1510_INT_FPGA_TS) { /* * The touchscreen interrupt is level-sensitive, so * we'll use the regular mask_ack routine for it. */ irq_set_chip(i, &omap_fpga_irq_ack); } else { /* * All FPGA interrupts except the touchscreen are * edge-sensitive, so we won't mask them. */ irq_set_chip(i, &omap_fpga_irq); } irq_set_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); } /* * The FPGA interrupt line is connected to GPIO13. Claim this pin for * the ARM. * * NOTE: For general GPIO/MPUIO access and interrupts, please see * gpio.[ch] */ res = gpio_request(13, "FPGA irq"); if (res) { pr_err("%s failed to get gpio\n", __func__); return; } gpio_direction_input(13); irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING); irq_set_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux); }
gpl-2.0
maikelwever/android_kernel_htc_msm8660-caf
drivers/w1/w1_io.c
2361
11569
/* * w1_io.c * * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/io.h> #include <linux/delay.h> #include <linux/moduleparam.h> #include <linux/module.h> #include "w1.h" #include "w1_log.h" static int w1_delay_parm = 1; module_param_named(delay_coef, w1_delay_parm, int, 0); static u8 w1_crc8_table[] = { 0, 94, 188, 226, 97, 63, 221, 131, 194, 156, 126, 32, 163, 253, 31, 65, 157, 195, 33, 127, 252, 162, 64, 30, 95, 1, 227, 189, 62, 96, 130, 220, 35, 125, 159, 193, 66, 28, 254, 160, 225, 191, 93, 3, 128, 222, 60, 98, 190, 224, 2, 92, 223, 129, 99, 61, 124, 34, 192, 158, 29, 67, 161, 255, 70, 24, 250, 164, 39, 121, 155, 197, 132, 218, 56, 102, 229, 187, 89, 7, 219, 133, 103, 57, 186, 228, 6, 88, 25, 71, 165, 251, 120, 38, 196, 154, 101, 59, 217, 135, 4, 90, 184, 230, 167, 249, 27, 69, 198, 152, 122, 36, 248, 166, 68, 26, 153, 199, 37, 123, 58, 100, 134, 216, 91, 5, 231, 185, 140, 210, 48, 110, 237, 179, 81, 15, 78, 16, 242, 172, 47, 113, 147, 205, 17, 79, 173, 243, 112, 46, 204, 146, 211, 141, 111, 49, 178, 236, 14, 80, 175, 241, 19, 77, 206, 144, 114, 44, 109, 51, 209, 143, 12, 82, 176, 238, 50, 108, 142, 208, 83, 13, 239, 177, 240, 174, 76, 18, 145, 207, 45, 115, 202, 148, 118, 40, 171, 245, 23, 73, 8, 86, 180, 234, 105, 55, 213, 139, 87, 9, 235, 181, 54, 104, 138, 212, 149, 203, 41, 119, 244, 170, 72, 22, 233, 183, 85, 11, 136, 214, 52, 106, 43, 117, 151, 201, 74, 20, 246, 168, 116, 42, 200, 150, 21, 75, 169, 247, 182, 232, 10, 84, 215, 137, 107, 53 }; static void w1_delay(unsigned long tm) { udelay(tm * w1_delay_parm); } static void w1_write_bit(struct w1_master *dev, int bit); static u8 w1_read_bit(struct w1_master *dev); /** * Generates a write-0 or write-1 cycle and samples the level. */ static u8 w1_touch_bit(struct w1_master *dev, int bit) { if (dev->bus_master->touch_bit) return dev->bus_master->touch_bit(dev->bus_master->data, bit); else if (bit) return w1_read_bit(dev); else { w1_write_bit(dev, 0); return 0; } } /** * Generates a write-0 or write-1 cycle. * Only call if dev->bus_master->touch_bit is NULL */ static void w1_write_bit(struct w1_master *dev, int bit) { if (bit) { dev->bus_master->write_bit(dev->bus_master->data, 0); w1_delay(6); dev->bus_master->write_bit(dev->bus_master->data, 1); w1_delay(64); } else { dev->bus_master->write_bit(dev->bus_master->data, 0); w1_delay(60); dev->bus_master->write_bit(dev->bus_master->data, 1); w1_delay(10); } } /** * Pre-write operation, currently only supporting strong pullups. * Program the hardware for a strong pullup, if one has been requested and * the hardware supports it. * * @param dev the master device */ static void w1_pre_write(struct w1_master *dev) { if (dev->pullup_duration && dev->enable_pullup && dev->bus_master->set_pullup) { dev->bus_master->set_pullup(dev->bus_master->data, dev->pullup_duration); } } /** * Post-write operation, currently only supporting strong pullups. * If a strong pullup was requested, clear it if the hardware supports * them, or execute the delay otherwise, in either case clear the request. * * @param dev the master device */ static void w1_post_write(struct w1_master *dev) { if (dev->pullup_duration) { if (dev->enable_pullup && dev->bus_master->set_pullup) dev->bus_master->set_pullup(dev->bus_master->data, 0); else msleep(dev->pullup_duration); dev->pullup_duration = 0; } } /** * Writes 8 bits. * * @param dev the master device * @param byte the byte to write */ void w1_write_8(struct w1_master *dev, u8 byte) { int i; if (dev->bus_master->write_byte) { w1_pre_write(dev); dev->bus_master->write_byte(dev->bus_master->data, byte); } else for (i = 0; i < 8; ++i) { if (i == 7) w1_pre_write(dev); w1_touch_bit(dev, (byte >> i) & 0x1); } w1_post_write(dev); } EXPORT_SYMBOL_GPL(w1_write_8); /** * Generates a write-1 cycle and samples the level. * Only call if dev->bus_master->touch_bit is NULL */ static u8 w1_read_bit(struct w1_master *dev) { int result; dev->bus_master->write_bit(dev->bus_master->data, 0); w1_delay(6); dev->bus_master->write_bit(dev->bus_master->data, 1); w1_delay(9); result = dev->bus_master->read_bit(dev->bus_master->data); w1_delay(55); return result & 0x1; } /** * Does a triplet - used for searching ROM addresses. * Return bits: * bit 0 = id_bit * bit 1 = comp_bit * bit 2 = dir_taken * If both bits 0 & 1 are set, the search should be restarted. * * @param dev the master device * @param bdir the bit to write if both id_bit and comp_bit are 0 * @return bit fields - see above */ u8 w1_triplet(struct w1_master *dev, int bdir) { if (dev->bus_master->triplet) return dev->bus_master->triplet(dev->bus_master->data, bdir); else { u8 id_bit = w1_touch_bit(dev, 1); u8 comp_bit = w1_touch_bit(dev, 1); u8 retval; if (id_bit && comp_bit) return 0x03; /* error */ if (!id_bit && !comp_bit) { /* Both bits are valid, take the direction given */ retval = bdir ? 0x04 : 0; } else { /* Only one bit is valid, take that direction */ bdir = id_bit; retval = id_bit ? 0x05 : 0x02; } if (dev->bus_master->touch_bit) w1_touch_bit(dev, bdir); else w1_write_bit(dev, bdir); return retval; } } /** * Reads 8 bits. * * @param dev the master device * @return the byte read */ u8 w1_read_8(struct w1_master *dev) { int i; u8 res = 0; if (dev->bus_master->read_byte) res = dev->bus_master->read_byte(dev->bus_master->data); else for (i = 0; i < 8; ++i) res |= (w1_touch_bit(dev,1) << i); return res; } EXPORT_SYMBOL_GPL(w1_read_8); /** * Writes a series of bytes. * * @param dev the master device * @param buf pointer to the data to write * @param len the number of bytes to write */ void w1_write_block(struct w1_master *dev, const u8 *buf, int len) { int i; if (dev->bus_master->write_block) { w1_pre_write(dev); dev->bus_master->write_block(dev->bus_master->data, buf, len); } else for (i = 0; i < len; ++i) w1_write_8(dev, buf[i]); /* calls w1_pre_write */ w1_post_write(dev); } EXPORT_SYMBOL_GPL(w1_write_block); /** * Touches a series of bytes. * * @param dev the master device * @param buf pointer to the data to write * @param len the number of bytes to write */ void w1_touch_block(struct w1_master *dev, u8 *buf, int len) { int i, j; u8 tmp; for (i = 0; i < len; ++i) { tmp = 0; for (j = 0; j < 8; ++j) { if (j == 7) w1_pre_write(dev); tmp |= w1_touch_bit(dev, (buf[i] >> j) & 0x1) << j; } buf[i] = tmp; } } EXPORT_SYMBOL_GPL(w1_touch_block); /** * Reads a series of bytes. * * @param dev the master device * @param buf pointer to the buffer to fill * @param len the number of bytes to read * @return the number of bytes read */ u8 w1_read_block(struct w1_master *dev, u8 *buf, int len) { int i; u8 ret; if (dev->bus_master->read_block) ret = dev->bus_master->read_block(dev->bus_master->data, buf, len); else { for (i = 0; i < len; ++i) buf[i] = w1_read_8(dev); ret = len; } return ret; } EXPORT_SYMBOL_GPL(w1_read_block); /** * Issues a reset bus sequence. * * @param dev The bus master pointer * @return 0=Device present, 1=No device present or error */ int w1_reset_bus(struct w1_master *dev) { int result; if (dev->bus_master->reset_bus) result = dev->bus_master->reset_bus(dev->bus_master->data) & 0x1; else { dev->bus_master->write_bit(dev->bus_master->data, 0); /* minimum 480, max ? us * be nice and sleep, except 18b20 spec lists 960us maximum, * so until we can sleep with microsecond accuracy, spin. * Feel free to come up with some other way to give up the * cpu for such a short amount of time AND get it back in * the maximum amount of time. */ w1_delay(480); dev->bus_master->write_bit(dev->bus_master->data, 1); w1_delay(70); result = dev->bus_master->read_bit(dev->bus_master->data) & 0x1; /* minmum 70 (above) + 410 = 480 us * There aren't any timing requirements between a reset and * the following transactions. Sleeping is safe here. */ /* w1_delay(410); min required time */ msleep(1); } return result; } EXPORT_SYMBOL_GPL(w1_reset_bus); u8 w1_calc_crc8(u8 * data, int len) { u8 crc = 0; while (len--) crc = w1_crc8_table[crc ^ *data++]; return crc; } EXPORT_SYMBOL_GPL(w1_calc_crc8); void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb) { dev->attempts++; if (dev->bus_master->search) dev->bus_master->search(dev->bus_master->data, dev, search_type, cb); else w1_search(dev, search_type, cb); } /** * Resets the bus and then selects the slave by sending either a skip rom * or a rom match. * The w1 master lock must be held. * * @param sl the slave to select * @return 0=success, anything else=error */ int w1_reset_select_slave(struct w1_slave *sl) { if (w1_reset_bus(sl->master)) return -1; if (sl->master->slave_count == 1) w1_write_8(sl->master, W1_SKIP_ROM); else { u8 match[9] = {W1_MATCH_ROM, }; u64 rn = le64_to_cpu(*((u64*)&sl->reg_num)); memcpy(&match[1], &rn, 8); w1_write_block(sl->master, match, 9); } return 0; } EXPORT_SYMBOL_GPL(w1_reset_select_slave); /** * When the workflow with a slave amongst many requires several * successive commands a reset between each, this function is similar * to doing a reset then a match ROM for the last matched ROM. The * advantage being that the matched ROM step is skipped in favor of the * resume command. The slave must support the command of course. * * If the bus has only one slave, traditionnaly the match ROM is skipped * and a "SKIP ROM" is done for efficiency. On multi-slave busses, this * doesn't work of course, but the resume command is the next best thing. * * The w1 master lock must be held. * * @param dev the master device */ int w1_reset_resume_command(struct w1_master *dev) { if (w1_reset_bus(dev)) return -1; /* This will make only the last matched slave perform a skip ROM. */ w1_write_8(dev, W1_RESUME_CMD); return 0; } EXPORT_SYMBOL_GPL(w1_reset_resume_command); /** * Put out a strong pull-up of the specified duration after the next write * operation. Not all hardware supports strong pullups. Hardware that * doesn't support strong pullups will sleep for the given time after the * write operation without a strong pullup. This is a one shot request for * the next write, specifying zero will clear a previous request. * The w1 master lock must be held. * * @param delay time in milliseconds * @return 0=success, anything else=error */ void w1_next_pullup(struct w1_master *dev, int delay) { dev->pullup_duration = delay; } EXPORT_SYMBOL_GPL(w1_next_pullup);
gpl-2.0
kannu1994/crespo_kernel
net/netfilter/nfnetlink_queue.c
2361
22603
/* * This is a module which is used for queueing packets and communicating with * userspace via nfnetlink. * * (C) 2005 by Harald Welte <laforge@netfilter.org> * (C) 2007 by Patrick McHardy <kaber@trash.net> * * Based on the old ipv4-only ip_queue.c: * (C) 2000-2002 James Morris <jmorris@intercode.com.au> * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/proc_fs.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_queue.h> #include <linux/list.h> #include <net/sock.h> #include <net/netfilter/nf_queue.h> #include <asm/atomic.h> #ifdef CONFIG_BRIDGE_NETFILTER #include "../bridge/br_private.h" #endif #define NFQNL_QMAX_DEFAULT 1024 struct nfqnl_instance { struct hlist_node hlist; /* global list of queues */ struct rcu_head rcu; int peer_pid; unsigned int queue_maxlen; unsigned int copy_range; unsigned int queue_dropped; unsigned int queue_user_dropped; u_int16_t queue_num; /* number of this queue */ u_int8_t copy_mode; /* * Following fields are dirtied for each queued packet, * keep them in same cache line if possible. */ spinlock_t lock; unsigned int queue_total; atomic_t id_sequence; /* 'sequence' of pkt ids */ struct list_head queue_list; /* packets in queue */ }; typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); static DEFINE_SPINLOCK(instances_lock); #define INSTANCE_BUCKETS 16 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; static inline u_int8_t instance_hashfn(u_int16_t queue_num) { return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; } static struct nfqnl_instance * instance_lookup(u_int16_t queue_num) { struct hlist_head *head; struct hlist_node *pos; struct nfqnl_instance *inst; head = &instance_table[instance_hashfn(queue_num)]; hlist_for_each_entry_rcu(inst, pos, head, hlist) { if (inst->queue_num == queue_num) return inst; } return NULL; } static struct nfqnl_instance * instance_create(u_int16_t queue_num, int pid) { struct nfqnl_instance *inst; unsigned int h; int err; spin_lock(&instances_lock); if (instance_lookup(queue_num)) { err = -EEXIST; goto out_unlock; } inst = kzalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) { err = -ENOMEM; goto out_unlock; } inst->queue_num = queue_num; inst->peer_pid = pid; inst->queue_maxlen = NFQNL_QMAX_DEFAULT; inst->copy_range = 0xfffff; inst->copy_mode = NFQNL_COPY_NONE; spin_lock_init(&inst->lock); INIT_LIST_HEAD(&inst->queue_list); if (!try_module_get(THIS_MODULE)) { err = -EAGAIN; goto out_free; } h = instance_hashfn(queue_num); hlist_add_head_rcu(&inst->hlist, &instance_table[h]); spin_unlock(&instances_lock); return inst; out_free: kfree(inst); out_unlock: spin_unlock(&instances_lock); return ERR_PTR(err); } static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data); static void instance_destroy_rcu(struct rcu_head *head) { struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, rcu); nfqnl_flush(inst, NULL, 0); kfree(inst); module_put(THIS_MODULE); } static void __instance_destroy(struct nfqnl_instance *inst) { hlist_del_rcu(&inst->hlist); call_rcu(&inst->rcu, instance_destroy_rcu); } static void instance_destroy(struct nfqnl_instance *inst) { spin_lock(&instances_lock); __instance_destroy(inst); spin_unlock(&instances_lock); } static inline void __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) { list_add_tail(&entry->list, &queue->queue_list); queue->queue_total++; } static struct nf_queue_entry * find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) { struct nf_queue_entry *entry = NULL, *i; spin_lock_bh(&queue->lock); list_for_each_entry(i, &queue->queue_list, list) { if (i->id == id) { entry = i; break; } } if (entry) { list_del(&entry->list); queue->queue_total--; } spin_unlock_bh(&queue->lock); return entry; } static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) { struct nf_queue_entry *entry, *next; spin_lock_bh(&queue->lock); list_for_each_entry_safe(entry, next, &queue->queue_list, list) { if (!cmpfn || cmpfn(entry, data)) { list_del(&entry->list); queue->queue_total--; nf_reinject(entry, NF_DROP); } } spin_unlock_bh(&queue->lock); } static struct sk_buff * nfqnl_build_packet_message(struct nfqnl_instance *queue, struct nf_queue_entry *entry) { sk_buff_data_t old_tail; size_t size; size_t data_len = 0; struct sk_buff *skb; struct nfqnl_msg_packet_hdr pmsg; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct sk_buff *entskb = entry->skb; struct net_device *indev; struct net_device *outdev; size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #ifdef CONFIG_BRIDGE_NETFILTER + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif + nla_total_size(sizeof(u_int32_t)) /* mark */ + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); outdev = entry->outdev; switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { case NFQNL_COPY_META: case NFQNL_COPY_NONE: break; case NFQNL_COPY_PACKET: if (entskb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(entskb)) return NULL; data_len = ACCESS_ONCE(queue->copy_range); if (data_len == 0 || data_len > entskb->len) data_len = entskb->len; size += nla_total_size(data_len); break; } skb = alloc_skb(size, GFP_ATOMIC); if (!skb) goto nlmsg_failure; old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, sizeof(struct nfgenmsg)); nfmsg = NLMSG_DATA(nlh); nfmsg->nfgen_family = entry->pf; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(queue->queue_num); entry->id = atomic_inc_return(&queue->id_sequence); pmsg.packet_id = htonl(entry->id); pmsg.hw_protocol = entskb->protocol; pmsg.hook = entry->hook; NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); indev = entry->indev; if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); #else if (entry->pf == PF_BRIDGE) { /* Case 1: indev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, htonl(indev->ifindex)); /* this is the bridge group "brX" */ /* rcu_read_lock()ed by __nf_queue */ NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(br_port_get_rcu(indev)->br->dev->ifindex)); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); if (entskb->nf_bridge && entskb->nf_bridge->physindev) NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, htonl(entskb->nf_bridge->physindev->ifindex)); } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); #else if (entry->pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, htonl(outdev->ifindex)); /* this is the bridge group "brX" */ /* rcu_read_lock()ed by __nf_queue */ NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); } else { /* Case 2: outdev is bridge group, we need to look for * physical output device (when called from ipv4) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, htonl(entskb->nf_bridge->physoutdev->ifindex)); } #endif } if (entskb->mark) NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); if (indev && entskb->dev && entskb->mac_header != entskb->network_header) { struct nfqnl_msg_packet_hw phw; int len = dev_parse_header(entskb, phw.hw_addr); if (len) { phw.hw_addrlen = htons(len); NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); } } if (entskb->tstamp.tv64) { struct nfqnl_msg_packet_timestamp ts; struct timeval tv = ktime_to_timeval(entskb->tstamp); ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); } if (data_len) { struct nlattr *nla; int sz = nla_attr_size(data_len); if (skb_tailroom(skb) < nla_total_size(data_len)) { printk(KERN_WARNING "nf_queue: no tailroom!\n"); goto nlmsg_failure; } nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); nla->nla_type = NFQA_PAYLOAD; nla->nla_len = sz; if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) BUG(); } nlh->nlmsg_len = skb->tail - old_tail; return skb; nlmsg_failure: nla_put_failure: if (skb) kfree_skb(skb); if (net_ratelimit()) printk(KERN_ERR "nf_queue: error creating packet message\n"); return NULL; } static int nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) { struct sk_buff *nskb; struct nfqnl_instance *queue; int err = -ENOBUFS; /* rcu_read_lock()ed by nf_hook_slow() */ queue = instance_lookup(queuenum); if (!queue) { err = -ESRCH; goto err_out; } if (queue->copy_mode == NFQNL_COPY_NONE) { err = -EINVAL; goto err_out; } nskb = nfqnl_build_packet_message(queue, entry); if (nskb == NULL) { err = -ENOMEM; goto err_out; } spin_lock_bh(&queue->lock); if (!queue->peer_pid) { err = -EINVAL; goto err_out_free_nskb; } if (queue->queue_total >= queue->queue_maxlen) { queue->queue_dropped++; if (net_ratelimit()) printk(KERN_WARNING "nf_queue: full at %d entries, " "dropping packets(s).\n", queue->queue_total); goto err_out_free_nskb; } /* nfnetlink_unicast will either free the nskb or add it to a socket */ err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); if (err < 0) { queue->queue_user_dropped++; goto err_out_unlock; } __enqueue_entry(queue, entry); spin_unlock_bh(&queue->lock); return 0; err_out_free_nskb: kfree_skb(nskb); err_out_unlock: spin_unlock_bh(&queue->lock); err_out: return err; } static int nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) { struct sk_buff *nskb; int diff; diff = data_len - e->skb->len; if (diff < 0) { if (pskb_trim(e->skb, data_len)) return -ENOMEM; } else if (diff > 0) { if (data_len > 0xFFFF) return -EINVAL; if (diff > skb_tailroom(e->skb)) { nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), diff, GFP_ATOMIC); if (!nskb) { printk(KERN_WARNING "nf_queue: OOM " "in mangle, dropping packet\n"); return -ENOMEM; } kfree_skb(e->skb); e->skb = nskb; } skb_put(e->skb, diff); } if (!skb_make_writable(e->skb, data_len)) return -ENOMEM; skb_copy_to_linear_data(e->skb, data, data_len); e->skb->ip_summed = CHECKSUM_NONE; return 0; } static int nfqnl_set_mode(struct nfqnl_instance *queue, unsigned char mode, unsigned int range) { int status = 0; spin_lock_bh(&queue->lock); switch (mode) { case NFQNL_COPY_NONE: case NFQNL_COPY_META: queue->copy_mode = mode; queue->copy_range = 0; break; case NFQNL_COPY_PACKET: queue->copy_mode = mode; /* we're using struct nlattr which has 16bit nla_len */ if (range > 0xffff) queue->copy_range = 0xffff; else queue->copy_range = range; break; default: status = -EINVAL; } spin_unlock_bh(&queue->lock); return status; } static int dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) { if (entry->indev) if (entry->indev->ifindex == ifindex) return 1; if (entry->outdev) if (entry->outdev->ifindex == ifindex) return 1; #ifdef CONFIG_BRIDGE_NETFILTER if (entry->skb->nf_bridge) { if (entry->skb->nf_bridge->physindev && entry->skb->nf_bridge->physindev->ifindex == ifindex) return 1; if (entry->skb->nf_bridge->physoutdev && entry->skb->nf_bridge->physoutdev->ifindex == ifindex) return 1; } #endif return 0; } /* drop all packets with either indev or outdev == ifindex from all queue * instances */ static void nfqnl_dev_drop(int ifindex) { int i; rcu_read_lock(); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp; struct nfqnl_instance *inst; struct hlist_head *head = &instance_table[i]; hlist_for_each_entry_rcu(inst, tmp, head, hlist) nfqnl_flush(inst, dev_cmp, ifindex); } rcu_read_unlock(); } #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) static int nfqnl_rcv_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) nfqnl_dev_drop(dev->ifindex); return NOTIFY_DONE; } static struct notifier_block nfqnl_dev_notifier = { .notifier_call = nfqnl_rcv_dev_event, }; static int nfqnl_rcv_nl_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netlink_notify *n = ptr; if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { int i; /* destroy all instances for this pid */ spin_lock(&instances_lock); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp, *t2; struct nfqnl_instance *inst; struct hlist_head *head = &instance_table[i]; hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { if ((n->net == &init_net) && (n->pid == inst->peer_pid)) __instance_destroy(inst); } } spin_unlock(&instances_lock); } return NOTIFY_DONE; } static struct notifier_block nfqnl_rtnl_notifier = { .notifier_call = nfqnl_rcv_nl_event, }; static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, [NFQA_MARK] = { .type = NLA_U32 }, [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, }; static int nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); struct nfqnl_msg_verdict_hdr *vhdr; struct nfqnl_instance *queue; unsigned int verdict; struct nf_queue_entry *entry; int err; rcu_read_lock(); queue = instance_lookup(queue_num); if (!queue) { err = -ENODEV; goto err_out_unlock; } if (queue->peer_pid != NETLINK_CB(skb).pid) { err = -EPERM; goto err_out_unlock; } if (!nfqa[NFQA_VERDICT_HDR]) { err = -EINVAL; goto err_out_unlock; } vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); verdict = ntohl(vhdr->verdict); if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { err = -EINVAL; goto err_out_unlock; } entry = find_dequeue_entry(queue, ntohl(vhdr->id)); if (entry == NULL) { err = -ENOENT; goto err_out_unlock; } rcu_read_unlock(); if (nfqa[NFQA_PAYLOAD]) { if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) verdict = NF_DROP; } if (nfqa[NFQA_MARK]) entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); nf_reinject(entry, verdict); return 0; err_out_unlock: rcu_read_unlock(); return err; } static int nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[]) { return -ENOTSUPP; } static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, }; static const struct nf_queue_handler nfqh = { .name = "nf_queue", .outfn = &nfqnl_enqueue_packet, }; static int nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); struct nfqnl_instance *queue; struct nfqnl_msg_config_cmd *cmd = NULL; int ret = 0; if (nfqa[NFQA_CFG_CMD]) { cmd = nla_data(nfqa[NFQA_CFG_CMD]); /* Commands without queue context - might sleep */ switch (cmd->command) { case NFQNL_CFG_CMD_PF_BIND: return nf_register_queue_handler(ntohs(cmd->pf), &nfqh); case NFQNL_CFG_CMD_PF_UNBIND: return nf_unregister_queue_handler(ntohs(cmd->pf), &nfqh); } } rcu_read_lock(); queue = instance_lookup(queue_num); if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { ret = -EPERM; goto err_out_unlock; } if (cmd != NULL) { switch (cmd->command) { case NFQNL_CFG_CMD_BIND: if (queue) { ret = -EBUSY; goto err_out_unlock; } queue = instance_create(queue_num, NETLINK_CB(skb).pid); if (IS_ERR(queue)) { ret = PTR_ERR(queue); goto err_out_unlock; } break; case NFQNL_CFG_CMD_UNBIND: if (!queue) { ret = -ENODEV; goto err_out_unlock; } instance_destroy(queue); break; case NFQNL_CFG_CMD_PF_BIND: case NFQNL_CFG_CMD_PF_UNBIND: break; default: ret = -ENOTSUPP; break; } } if (nfqa[NFQA_CFG_PARAMS]) { struct nfqnl_msg_config_params *params; if (!queue) { ret = -ENODEV; goto err_out_unlock; } params = nla_data(nfqa[NFQA_CFG_PARAMS]); nfqnl_set_mode(queue, params->copy_mode, ntohl(params->copy_range)); } if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { __be32 *queue_maxlen; if (!queue) { ret = -ENODEV; goto err_out_unlock; } queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); spin_lock_bh(&queue->lock); queue->queue_maxlen = ntohl(*queue_maxlen); spin_unlock_bh(&queue->lock); } err_out_unlock: rcu_read_unlock(); return ret; } static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, .attr_count = NFQA_MAX, }, [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, .attr_count = NFQA_MAX, .policy = nfqa_verdict_policy }, [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, .attr_count = NFQA_CFG_MAX, .policy = nfqa_cfg_policy }, }; static const struct nfnetlink_subsystem nfqnl_subsys = { .name = "nf_queue", .subsys_id = NFNL_SUBSYS_QUEUE, .cb_count = NFQNL_MSG_MAX, .cb = nfqnl_cb, }; #ifdef CONFIG_PROC_FS struct iter_state { unsigned int bucket; }; static struct hlist_node *get_first(struct seq_file *seq) { struct iter_state *st = seq->private; if (!st) return NULL; for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { if (!hlist_empty(&instance_table[st->bucket])) return instance_table[st->bucket].first; } return NULL; } static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) { struct iter_state *st = seq->private; h = h->next; while (!h) { if (++st->bucket >= INSTANCE_BUCKETS) return NULL; h = instance_table[st->bucket].first; } return h; } static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) { struct hlist_node *head; head = get_first(seq); if (head) while (pos && (head = get_next(seq, head))) pos--; return pos ? NULL : head; } static void *seq_start(struct seq_file *seq, loff_t *pos) __acquires(instances_lock) { spin_lock(&instances_lock); return get_idx(seq, *pos); } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { (*pos)++; return get_next(s, v); } static void seq_stop(struct seq_file *s, void *v) __releases(instances_lock) { spin_unlock(&instances_lock); } static int seq_show(struct seq_file *s, void *v) { const struct nfqnl_instance *inst = v; return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", inst->queue_num, inst->peer_pid, inst->queue_total, inst->copy_mode, inst->copy_range, inst->queue_dropped, inst->queue_user_dropped, atomic_read(&inst->id_sequence), 1); } static const struct seq_operations nfqnl_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; static int nfqnl_open(struct inode *inode, struct file *file) { return seq_open_private(file, &nfqnl_seq_ops, sizeof(struct iter_state)); } static const struct file_operations nfqnl_file_ops = { .owner = THIS_MODULE, .open = nfqnl_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* PROC_FS */ static int __init nfnetlink_queue_init(void) { int i, status = -ENOMEM; for (i = 0; i < INSTANCE_BUCKETS; i++) INIT_HLIST_HEAD(&instance_table[i]); netlink_register_notifier(&nfqnl_rtnl_notifier); status = nfnetlink_subsys_register(&nfqnl_subsys); if (status < 0) { printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); goto cleanup_netlink_notifier; } #ifdef CONFIG_PROC_FS if (!proc_create("nfnetlink_queue", 0440, proc_net_netfilter, &nfqnl_file_ops)) goto cleanup_subsys; #endif register_netdevice_notifier(&nfqnl_dev_notifier); return status; #ifdef CONFIG_PROC_FS cleanup_subsys: nfnetlink_subsys_unregister(&nfqnl_subsys); #endif cleanup_netlink_notifier: netlink_unregister_notifier(&nfqnl_rtnl_notifier); return status; } static void __exit nfnetlink_queue_fini(void) { nf_unregister_queue_handlers(&nfqh); unregister_netdevice_notifier(&nfqnl_dev_notifier); #ifdef CONFIG_PROC_FS remove_proc_entry("nfnetlink_queue", proc_net_netfilter); #endif nfnetlink_subsys_unregister(&nfqnl_subsys); netlink_unregister_notifier(&nfqnl_rtnl_notifier); rcu_barrier(); /* Wait for completion of call_rcu()'s */ } MODULE_DESCRIPTION("netfilter packet queue handler"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); module_init(nfnetlink_queue_init); module_exit(nfnetlink_queue_fini);
gpl-2.0
GuojianZhou/linux-yocto-3.14
drivers/staging/vt6655/IEEE11h.c
2617
9234
/* * Copyright (c) 1996, 2005 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: IEEE11h.c * * Purpose: * * Functions: * * Revision History: * * Author: Yiching Chen * * Date: Mar. 31, 2005 * */ #include "ttype.h" #include "tmacro.h" #include "tether.h" #include "IEEE11h.h" #include "device.h" #include "wmgr.h" #include "rxtx.h" #include "channel.h" /*--------------------- Static Definitions -------------------------*/ static int msglevel = MSG_LEVEL_INFO; #pragma pack(1) typedef struct _WLAN_FRAME_ACTION { WLAN_80211HDR_A3 Header; unsigned char byCategory; unsigned char byAction; unsigned char abyVars[1]; } WLAN_FRAME_ACTION, *PWLAN_FRAME_ACTION; typedef struct _WLAN_FRAME_MSRREQ { WLAN_80211HDR_A3 Header; unsigned char byCategory; unsigned char byAction; unsigned char byDialogToken; WLAN_IE_MEASURE_REQ sMSRReqEIDs[1]; } WLAN_FRAME_MSRREQ, *PWLAN_FRAME_MSRREQ; typedef struct _WLAN_FRAME_MSRREP { WLAN_80211HDR_A3 Header; unsigned char byCategory; unsigned char byAction; unsigned char byDialogToken; WLAN_IE_MEASURE_REP sMSRRepEIDs[1]; } WLAN_FRAME_MSRREP, *PWLAN_FRAME_MSRREP; typedef struct _WLAN_FRAME_TPCREQ { WLAN_80211HDR_A3 Header; unsigned char byCategory; unsigned char byAction; unsigned char byDialogToken; WLAN_IE_TPC_REQ sTPCReqEIDs; } WLAN_FRAME_TPCREQ, *PWLAN_FRAME_TPCREQ; typedef struct _WLAN_FRAME_TPCREP { WLAN_80211HDR_A3 Header; unsigned char byCategory; unsigned char byAction; unsigned char byDialogToken; WLAN_IE_TPC_REP sTPCRepEIDs; } WLAN_FRAME_TPCREP, *PWLAN_FRAME_TPCREP; #pragma pack() /* action field reference ieee 802.11h Table 20e */ #define ACTION_MSRREQ 0 #define ACTION_MSRREP 1 #define ACTION_TPCREQ 2 #define ACTION_TPCREP 3 #define ACTION_CHSW 4 /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ static bool s_bRxMSRReq(PSMgmtObject pMgmt, PWLAN_FRAME_MSRREQ pMSRReq, unsigned int uLength) { size_t uNumOfEIDs = 0; bool bResult = true; if (uLength <= WLAN_A3FR_MAXLEN) memcpy(pMgmt->abyCurrentMSRReq, pMSRReq, uLength); uNumOfEIDs = ((uLength - offsetof(WLAN_FRAME_MSRREQ, sMSRReqEIDs))/ (sizeof(WLAN_IE_MEASURE_REQ))); pMgmt->pCurrMeasureEIDRep = &(((PWLAN_FRAME_MSRREP) (pMgmt->abyCurrentMSRRep))->sMSRRepEIDs[0]); pMgmt->uLengthOfRepEIDs = 0; bResult = CARDbStartMeasure(pMgmt->pAdapter, ((PWLAN_FRAME_MSRREQ) (pMgmt->abyCurrentMSRReq))->sMSRReqEIDs, uNumOfEIDs ); return bResult; } static bool s_bRxTPCReq(PSMgmtObject pMgmt, PWLAN_FRAME_TPCREQ pTPCReq, unsigned char byRate, unsigned char byRSSI) { PWLAN_FRAME_TPCREP pFrame; PSTxMgmtPacket pTxPacket = NULL; pTxPacket = (PSTxMgmtPacket)pMgmt->pbyMgmtPacketPool; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket)); pFrame = (PWLAN_FRAME_TPCREP)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket)); pFrame->Header.wFrameCtl = (WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION) ); memcpy(pFrame->Header.abyAddr1, pTPCReq->Header.abyAddr2, WLAN_ADDR_LEN); memcpy(pFrame->Header.abyAddr2, CARDpGetCurrentAddress(pMgmt->pAdapter), WLAN_ADDR_LEN); memcpy(pFrame->Header.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); pFrame->byCategory = 0; pFrame->byAction = 3; pFrame->byDialogToken = ((PWLAN_FRAME_MSRREQ) (pMgmt->abyCurrentMSRReq))->byDialogToken; pFrame->sTPCRepEIDs.byElementID = WLAN_EID_TPC_REP; pFrame->sTPCRepEIDs.len = 2; pFrame->sTPCRepEIDs.byTxPower = CARDbyGetTransmitPower(pMgmt->pAdapter); switch (byRate) { case RATE_54M: pFrame->sTPCRepEIDs.byLinkMargin = 65 - byRSSI; break; case RATE_48M: pFrame->sTPCRepEIDs.byLinkMargin = 66 - byRSSI; break; case RATE_36M: pFrame->sTPCRepEIDs.byLinkMargin = 70 - byRSSI; break; case RATE_24M: pFrame->sTPCRepEIDs.byLinkMargin = 74 - byRSSI; break; case RATE_18M: pFrame->sTPCRepEIDs.byLinkMargin = 77 - byRSSI; break; case RATE_12M: pFrame->sTPCRepEIDs.byLinkMargin = 79 - byRSSI; break; case RATE_9M: pFrame->sTPCRepEIDs.byLinkMargin = 81 - byRSSI; break; case RATE_6M: default: pFrame->sTPCRepEIDs.byLinkMargin = 82 - byRSSI; break; } pTxPacket->cbMPDULen = sizeof(WLAN_FRAME_TPCREP); pTxPacket->cbPayloadLen = sizeof(WLAN_FRAME_TPCREP) - WLAN_HDR_ADDR3_LEN; if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING) return false; return true; /* return CARDbSendPacket(pMgmt->pAdapter, pFrame, PKT_TYPE_802_11_MNG, sizeof(WLAN_FRAME_TPCREP)); */ } /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Description: * Handles action management frames. * * Parameters: * In: * pMgmt - Management Object structure * pRxPacket - Received packet * Out: * none * * Return Value: None. * -*/ bool IEEE11hbMgrRxAction(void *pMgmtHandle, void *pRxPacket) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle; PWLAN_FRAME_ACTION pAction = NULL; unsigned int uLength = 0; PWLAN_IE_CH_SW pChannelSwitch = NULL; /* decode the frame */ uLength = ((PSRxMgmtPacket)pRxPacket)->cbMPDULen; if (uLength > WLAN_A3FR_MAXLEN) return false; pAction = (PWLAN_FRAME_ACTION) (((PSRxMgmtPacket)pRxPacket)->p80211Header); if (pAction->byCategory == 0) { switch (pAction->byAction) { case ACTION_MSRREQ: return s_bRxMSRReq(pMgmt, (PWLAN_FRAME_MSRREQ) pAction, uLength); break; case ACTION_MSRREP: break; case ACTION_TPCREQ: return s_bRxTPCReq(pMgmt, (PWLAN_FRAME_TPCREQ) pAction, ((PSRxMgmtPacket)pRxPacket)->byRxRate, (unsigned char) ((PSRxMgmtPacket)pRxPacket)->uRSSI); break; case ACTION_TPCREP: break; case ACTION_CHSW: pChannelSwitch = (PWLAN_IE_CH_SW) (pAction->abyVars); if ((pChannelSwitch->byElementID == WLAN_EID_CH_SWITCH) && (pChannelSwitch->len == 3)) { /* valid element id */ CARDbChannelSwitch(pMgmt->pAdapter, pChannelSwitch->byMode, get_channel_mapping(pMgmt->pAdapter, pChannelSwitch->byChannel, pMgmt->eCurrentPHYMode), pChannelSwitch->byCount); } break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unknown Action = %d\n", pAction->byAction); break; } } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Unknown Category = %d\n", pAction->byCategory); pAction->byCategory |= 0x80; /*return CARDbSendPacket(pMgmt->pAdapter, pAction, PKT_TYPE_802_11_MNG, uLength);*/ return true; } return true; } bool IEEE11hbMSRRepTx(void *pMgmtHandle) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle; PWLAN_FRAME_MSRREP pMSRRep = (PWLAN_FRAME_MSRREP) (pMgmt->abyCurrentMSRRep + sizeof(STxMgmtPacket)); size_t uLength = 0; PSTxMgmtPacket pTxPacket = NULL; pTxPacket = (PSTxMgmtPacket)pMgmt->abyCurrentMSRRep; memset(pTxPacket, 0, sizeof(STxMgmtPacket) + WLAN_A3FR_MAXLEN); pTxPacket->p80211Header = (PUWLAN_80211HDR)((unsigned char *)pTxPacket + sizeof(STxMgmtPacket)); pMSRRep->Header.wFrameCtl = (WLAN_SET_FC_FTYPE(WLAN_FTYPE_MGMT) | WLAN_SET_FC_FSTYPE(WLAN_FSTYPE_ACTION) ); memcpy(pMSRRep->Header.abyAddr1, ((PWLAN_FRAME_MSRREQ) (pMgmt->abyCurrentMSRReq))->Header.abyAddr2, WLAN_ADDR_LEN); memcpy(pMSRRep->Header.abyAddr2, CARDpGetCurrentAddress(pMgmt->pAdapter), WLAN_ADDR_LEN); memcpy(pMSRRep->Header.abyAddr3, pMgmt->abyCurrBSSID, WLAN_BSSID_LEN); pMSRRep->byCategory = 0; pMSRRep->byAction = 1; pMSRRep->byDialogToken = ((PWLAN_FRAME_MSRREQ) (pMgmt->abyCurrentMSRReq))->byDialogToken; uLength = pMgmt->uLengthOfRepEIDs + offsetof(WLAN_FRAME_MSRREP, sMSRRepEIDs); pTxPacket->cbMPDULen = uLength; pTxPacket->cbPayloadLen = uLength - WLAN_HDR_ADDR3_LEN; if (csMgmt_xmit(pMgmt->pAdapter, pTxPacket) != CMD_STATUS_PENDING) return false; return true; /* return CARDbSendPacket(pMgmt->pAdapter, pMSRRep, PKT_TYPE_802_11_MNG, uLength); */ }
gpl-2.0
ptmr3/Fpg_Kernel
drivers/usb/host/ohci-omap3.c
2617
6024
/* * ohci-omap3.c - driver for OHCI on OMAP3 and later processors * * Bus Glue for OMAP3 USBHOST 3 port OHCI controller * This controller is also used in later OMAPs and AM35x chips * * Copyright (C) 2007-2010 Texas Instruments, Inc. * Author: Vikram Pandita <vikram.pandita@ti.com> * Author: Anand Gadiyar <gadiyar@ti.com> * Author: Keshava Munegowda <keshava_mgowda@ti.com> * * Based on ehci-omap.c and some other ohci glue layers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * TODO (last updated Feb 27, 2011): * - add kernel-doc */ #include <linux/platform_device.h> #include <plat/usb.h> /*-------------------------------------------------------------------------*/ static int ohci_omap3_init(struct usb_hcd *hcd) { dev_dbg(hcd->self.controller, "starting OHCI controller\n"); return ohci_init(hcd_to_ohci(hcd)); } /*-------------------------------------------------------------------------*/ static int ohci_omap3_start(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); int ret; /* * RemoteWakeupConnected has to be set explicitly before * calling ohci_run. The reset value of RWC is 0. */ ohci->hc_control = OHCI_CTRL_RWC; writel(OHCI_CTRL_RWC, &ohci->regs->control); ret = ohci_run(ohci); if (ret < 0) { dev_err(hcd->self.controller, "can't start\n"); ohci_stop(hcd); } return ret; } /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_omap3_hc_driver = { .description = hcd_name, .product_desc = "OMAP3 OHCI Host Controller", .hcd_priv_size = sizeof(struct ohci_hcd), /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations */ .reset = ohci_omap3_init, .start = ohci_omap3_start, .stop = ohci_stop, .shutdown = ohci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ /* * configure so an HC device and id are always provided * always called with process context; sleeping is OK */ /** * ohci_hcd_omap3_probe - initialize OMAP-based HCDs * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usb_hcd *hcd = NULL; void __iomem *regs = NULL; struct resource *res; int ret = -ENODEV; int irq; if (usb_disabled()) goto err_end; if (!dev->parent) { dev_err(dev, "Missing parent device\n"); return -ENODEV; } irq = platform_get_irq_byname(pdev, "ohci-irq"); if (irq < 0) { dev_err(dev, "OHCI irq failed\n"); return -ENODEV; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ohci"); if (!ret) { dev_err(dev, "UHH OHCI get resource failed\n"); return -ENOMEM; } regs = ioremap(res->start, resource_size(res)); if (!regs) { dev_err(dev, "UHH OHCI ioremap failed\n"); return -ENOMEM; } hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, dev_name(dev)); if (!hcd) { dev_err(dev, "usb_create_hcd failed\n"); goto err_io; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = regs; ret = omap_usbhs_enable(dev); if (ret) { dev_dbg(dev, "failed to start ohci\n"); goto err_end; } ohci_hcd_init(hcd_to_ohci(hcd)); ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); if (ret) { dev_dbg(dev, "failed to add hcd with err %d\n", ret); goto err_add_hcd; } return 0; err_add_hcd: omap_usbhs_disable(dev); err_end: usb_put_hcd(hcd); err_io: iounmap(regs); return ret; } /* * may be called without controller electrically present * may be called with controller, bus, and devices active */ /** * ohci_hcd_omap3_remove - shutdown processing for OHCI HCDs * @pdev: USB Host Controller being removed * * Reverses the effect of ohci_hcd_omap3_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. */ static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usb_hcd *hcd = dev_get_drvdata(dev); iounmap(hcd->regs); usb_remove_hcd(hcd); omap_usbhs_disable(dev); usb_put_hcd(hcd); return 0; } static void ohci_hcd_omap3_shutdown(struct platform_device *pdev) { struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } static struct platform_driver ohci_hcd_omap3_driver = { .probe = ohci_hcd_omap3_probe, .remove = __devexit_p(ohci_hcd_omap3_remove), .shutdown = ohci_hcd_omap3_shutdown, .driver = { .name = "ohci-omap3", }, }; MODULE_ALIAS("platform:ohci-omap3"); MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
gpl-2.0
FAlinux-SoftwareinLife/silfa
OS/kernel/drivers/leds/leds-lp3944.c
4153
11398
/* * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip * * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ /* * I2C driver for National Semiconductor LP3944 Funlight Chip * http://www.national.com/pf/LP/LP3944.html * * This helper chip can drive up to 8 leds, with two programmable DIM modes; * it could even be used as a gpio expander but this driver assumes it is used * as a led controller. * * The DIM modes are used to set _blink_ patterns for leds, the pattern is * specified supplying two parameters: * - period: from 0s to 1.6s * - duty cycle: percentage of the period the led is on, from 0 to 100 * * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb * leds, the camera flash light and the displays backlights. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/leds-lp3944.h> /* Read Only Registers */ #define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */ #define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */ #define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */ #define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */ #define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */ #define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */ #define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */ #define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */ /* These registers are not used to control leds in LP3944, they can store * arbitrary values which the chip will ignore. */ #define LP3944_REG_REGISTER8 0x08 #define LP3944_REG_REGISTER9 0x09 #define LP3944_DIM0 0 #define LP3944_DIM1 1 /* period in ms */ #define LP3944_PERIOD_MIN 0 #define LP3944_PERIOD_MAX 1600 /* duty cycle is a percentage */ #define LP3944_DUTY_CYCLE_MIN 0 #define LP3944_DUTY_CYCLE_MAX 100 #define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev) /* Saved data */ struct lp3944_led_data { u8 id; enum lp3944_type type; enum lp3944_status status; struct led_classdev ldev; struct i2c_client *client; struct work_struct work; }; struct lp3944_data { struct mutex lock; struct i2c_client *client; struct lp3944_led_data leds[LP3944_LEDS_MAX]; }; static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value) { int tmp; tmp = i2c_smbus_read_byte_data(client, reg); if (tmp < 0) return -EINVAL; *value = tmp; return 0; } static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } /** * Set the period for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @period: period of a blink, that is a on/off cycle, expressed in ms. */ static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period) { u8 psc_reg; u8 psc_value; int err; if (dim == LP3944_DIM0) psc_reg = LP3944_REG_PSC0; else if (dim == LP3944_DIM1) psc_reg = LP3944_REG_PSC1; else return -EINVAL; /* Convert period to Prescaler value */ if (period > LP3944_PERIOD_MAX) return -EINVAL; psc_value = (period * 255) / LP3944_PERIOD_MAX; err = lp3944_reg_write(client, psc_reg, psc_value); return err; } /** * Set the duty cycle for DIM status * * @client: the i2c client * @dim: either LP3944_DIM0 or LP3944_DIM1 * @duty_cycle: percentage of a period during which a led is ON */ static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim, u8 duty_cycle) { u8 pwm_reg; u8 pwm_value; int err; if (dim == LP3944_DIM0) pwm_reg = LP3944_REG_PWM0; else if (dim == LP3944_DIM1) pwm_reg = LP3944_REG_PWM1; else return -EINVAL; /* Convert duty cycle to PWM value */ if (duty_cycle > LP3944_DUTY_CYCLE_MAX) return -EINVAL; pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX; err = lp3944_reg_write(client, pwm_reg, pwm_value); return err; } /** * Set the led status * * @led: a lp3944_led_data structure * @status: one of LP3944_LED_STATUS_OFF * LP3944_LED_STATUS_ON * LP3944_LED_STATUS_DIM0 * LP3944_LED_STATUS_DIM1 */ static int lp3944_led_set(struct lp3944_led_data *led, u8 status) { struct lp3944_data *data = i2c_get_clientdata(led->client); u8 id = led->id; u8 reg; u8 val = 0; int err; dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n", __func__, led->ldev.name, status); switch (id) { case LP3944_LED0: case LP3944_LED1: case LP3944_LED2: case LP3944_LED3: reg = LP3944_REG_LS0; break; case LP3944_LED4: case LP3944_LED5: case LP3944_LED6: case LP3944_LED7: id -= LP3944_LED4; reg = LP3944_REG_LS1; break; default: return -EINVAL; } if (status > LP3944_LED_STATUS_DIM1) return -EINVAL; /* invert only 0 and 1, leave unchanged the other values, * remember we are abusing status to set blink patterns */ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2) status = 1 - status; mutex_lock(&data->lock); lp3944_reg_read(led->client, reg, &val); val &= ~(LP3944_LED_STATUS_MASK << (id << 1)); val |= (status << (id << 1)); dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n", __func__, led->ldev.name, reg, id, status, val); /* set led status */ err = lp3944_reg_write(led->client, reg, val); mutex_unlock(&data->lock); return err; } static int lp3944_led_set_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct lp3944_led_data *led = ldev_to_led(led_cdev); u16 period; u8 duty_cycle; int err; /* units are in ms */ if (*delay_on + *delay_off > LP3944_PERIOD_MAX) return -EINVAL; if (*delay_on == 0 && *delay_off == 0) { /* Special case: the leds subsystem requires a default user * friendly blink pattern for the LED. Let's blink the led * slowly (1Hz). */ *delay_on = 500; *delay_off = 500; } period = (*delay_on) + (*delay_off); /* duty_cycle is the percentage of period during which the led is ON */ duty_cycle = 100 * (*delay_on) / period; /* invert duty cycle for inverted leds, this has the same effect of * swapping delay_on and delay_off */ if (led->type == LP3944_LED_TYPE_LED_INVERTED) duty_cycle = 100 - duty_cycle; /* NOTE: using always the first DIM mode, this means that all leds * will have the same blinking pattern. * * We could find a way later to have two leds blinking in hardware * with different patterns at the same time, falling back to software * control for the other ones. */ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period); if (err) return err; err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle); if (err) return err; dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n", __func__); led->status = LP3944_LED_STATUS_DIM0; schedule_work(&led->work); return 0; } static void lp3944_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { struct lp3944_led_data *led = ldev_to_led(led_cdev); dev_dbg(&led->client->dev, "%s: %s, %d\n", __func__, led_cdev->name, brightness); led->status = brightness; schedule_work(&led->work); } static void lp3944_led_work(struct work_struct *work) { struct lp3944_led_data *led; led = container_of(work, struct lp3944_led_data, work); lp3944_led_set(led, led->status); } static int lp3944_configure(struct i2c_client *client, struct lp3944_data *data, struct lp3944_platform_data *pdata) { int i, err = 0; for (i = 0; i < pdata->leds_size; i++) { struct lp3944_led *pled = &pdata->leds[i]; struct lp3944_led_data *led = &data->leds[i]; led->client = client; led->id = i; switch (pled->type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led->type = pled->type; led->status = pled->status; led->ldev.name = pled->name; led->ldev.max_brightness = 1; led->ldev.brightness_set = lp3944_led_set_brightness; led->ldev.blink_set = lp3944_led_set_blink; led->ldev.flags = LED_CORE_SUSPENDRESUME; INIT_WORK(&led->work, lp3944_led_work); err = led_classdev_register(&client->dev, &led->ldev); if (err < 0) { dev_err(&client->dev, "couldn't register LED %s\n", led->ldev.name); goto exit; } /* to expose the default value to userspace */ led->ldev.brightness = led->status; /* Set the default led status */ err = lp3944_led_set(led, led->status); if (err < 0) { dev_err(&client->dev, "%s couldn't set STATUS %d\n", led->ldev.name, led->status); goto exit; } break; case LP3944_LED_TYPE_NONE: default: break; } } return 0; exit: if (i > 0) for (i = i - 1; i >= 0; i--) switch (pdata->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } return err; } static int __devinit lp3944_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; struct lp3944_data *data; int err; if (lp3944_pdata == NULL) { dev_err(&client->dev, "no platform data\n"); return -EINVAL; } /* Let's see whether this adapter can support what we need. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "insufficient functionality!\n"); return -ENODEV; } data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; i2c_set_clientdata(client, data); mutex_init(&data->lock); err = lp3944_configure(client, data, lp3944_pdata); if (err < 0) { kfree(data); return err; } dev_info(&client->dev, "lp3944 enabled\n"); return 0; } static int __devexit lp3944_remove(struct i2c_client *client) { struct lp3944_platform_data *pdata = client->dev.platform_data; struct lp3944_data *data = i2c_get_clientdata(client); int i; for (i = 0; i < pdata->leds_size; i++) switch (data->leds[i].type) { case LP3944_LED_TYPE_LED: case LP3944_LED_TYPE_LED_INVERTED: led_classdev_unregister(&data->leds[i].ldev); cancel_work_sync(&data->leds[i].work); break; case LP3944_LED_TYPE_NONE: default: break; } kfree(data); return 0; } /* lp3944 i2c driver struct */ static const struct i2c_device_id lp3944_id[] = { {"lp3944", 0}, {} }; MODULE_DEVICE_TABLE(i2c, lp3944_id); static struct i2c_driver lp3944_driver = { .driver = { .name = "lp3944", }, .probe = lp3944_probe, .remove = __devexit_p(lp3944_remove), .id_table = lp3944_id, }; static int __init lp3944_module_init(void) { return i2c_add_driver(&lp3944_driver); } static void __exit lp3944_module_exit(void) { i2c_del_driver(&lp3944_driver); } module_init(lp3944_module_init); module_exit(lp3944_module_exit); MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); MODULE_DESCRIPTION("LP3944 Fun Light Chip"); MODULE_LICENSE("GPL");
gpl-2.0
Talustus/android_kernel_samsung_galaxys4
drivers/parport/parport_ip32.c
7481
68661
/* Low-level parallel port routines for built-in port on SGI IP32 * * Author: Arnaud Giersch <arnaud.giersch@free.fr> * * Based on parport_pc.c by * Phil Blundell, Tim Waugh, Jose Renau, David Campbell, * Andrea Arcangeli, et al. * * Thanks to Ilya A. Volynets-Evenbakh for his help. * * Copyright (C) 2005, 2006 Arnaud Giersch. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Current status: * * Basic SPP and PS2 modes are supported. * Support for parallel port IRQ is present. * Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are * supported. * SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with * or without interrupt support. * * Hardware ECP mode is not fully implemented (ecp_read_data and * ecp_write_addr are actually missing). * * To do: * * Fully implement ECP mode. * EPP and ECP mode need to be tested. I currently do not own any * peripheral supporting these extended mode, and cannot test them. * If DMA mode works well, decide if support for PIO FIFO modes should be * dropped. * Use the io{read,write} family functions when they become available in * the linux-mips.org tree. Note: the MIPS specific functions readsb() * and writesb() are to be translated by ioread8_rep() and iowrite8_rep() * respectively. */ /* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1]. * This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte * FIFO buffer and supports DMA transfers. * * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html * * Theoretically, we could simply use the parport_pc module. It is however * not so simple. The parport_pc code assumes that the parallel port * registers are port-mapped. On the O2, they are memory-mapped. * Furthermore, each register is replicated on 256 consecutive addresses (as * it is for the built-in serial ports on the same chip). */ /*--- Some configuration defines ---------------------------------------*/ /* DEBUG_PARPORT_IP32 * 0 disable debug * 1 standard level: pr_debug1 is enabled * 2 parport_ip32_dump_state is enabled * >=3 verbose level: pr_debug is enabled */ #if !defined(DEBUG_PARPORT_IP32) # define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */ #endif /*----------------------------------------------------------------------*/ /* Setup DEBUG macros. This is done before any includes, just in case we * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3. */ #if DEBUG_PARPORT_IP32 == 1 # warning DEBUG_PARPORT_IP32 == 1 #elif DEBUG_PARPORT_IP32 == 2 # warning DEBUG_PARPORT_IP32 == 2 #elif DEBUG_PARPORT_IP32 >= 3 # warning DEBUG_PARPORT_IP32 >= 3 # if !defined(DEBUG) # define DEBUG /* enable pr_debug() in kernel.h */ # endif #endif #include <linux/completion.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/parport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/types.h> #include <asm/io.h> #include <asm/ip32/ip32_ints.h> #include <asm/ip32/mace.h> /*--- Global variables -------------------------------------------------*/ /* Verbose probing on by default for debugging. */ #if DEBUG_PARPORT_IP32 >= 1 # define DEFAULT_VERBOSE_PROBING 1 #else # define DEFAULT_VERBOSE_PROBING 0 #endif /* Default prefix for printk */ #define PPIP32 "parport_ip32: " /* * These are the module parameters: * @features: bit mask of features to enable/disable * (all enabled by default) * @verbose_probing: log chit-chat during initialization */ #define PARPORT_IP32_ENABLE_IRQ (1U << 0) #define PARPORT_IP32_ENABLE_DMA (1U << 1) #define PARPORT_IP32_ENABLE_SPP (1U << 2) #define PARPORT_IP32_ENABLE_EPP (1U << 3) #define PARPORT_IP32_ENABLE_ECP (1U << 4) static unsigned int features = ~0U; static bool verbose_probing = DEFAULT_VERBOSE_PROBING; /* We do not support more than one port. */ static struct parport *this_port = NULL; /* Timing constants for FIFO modes. */ #define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */ #define FIFO_POLLING_INTERVAL 50 /* microseconds */ /*--- I/O register definitions -----------------------------------------*/ /** * struct parport_ip32_regs - virtual addresses of parallel port registers * @data: Data Register * @dsr: Device Status Register * @dcr: Device Control Register * @eppAddr: EPP Address Register * @eppData0: EPP Data Register 0 * @eppData1: EPP Data Register 1 * @eppData2: EPP Data Register 2 * @eppData3: EPP Data Register 3 * @ecpAFifo: ECP Address FIFO * @fifo: General FIFO register. The same address is used for: * - cFifo, the Parallel Port DATA FIFO * - ecpDFifo, the ECP Data FIFO * - tFifo, the ECP Test FIFO * @cnfgA: Configuration Register A * @cnfgB: Configuration Register B * @ecr: Extended Control Register */ struct parport_ip32_regs { void __iomem *data; void __iomem *dsr; void __iomem *dcr; void __iomem *eppAddr; void __iomem *eppData0; void __iomem *eppData1; void __iomem *eppData2; void __iomem *eppData3; void __iomem *ecpAFifo; void __iomem *fifo; void __iomem *cnfgA; void __iomem *cnfgB; void __iomem *ecr; }; /* Device Status Register */ #define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */ #define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */ #define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */ #define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */ #define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */ #define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */ /* #define DSR_reserved (1U << 1) */ #define DSR_TIMEOUT (1U << 0) /* EPP timeout */ /* Device Control Register */ /* #define DCR_reserved (1U << 7) | (1U << 6) */ #define DCR_DIR (1U << 5) /* direction */ #define DCR_IRQ (1U << 4) /* interrupt on nAck */ #define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */ #define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */ #define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */ #define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */ /* ECP Configuration Register A */ #define CNFGA_IRQ (1U << 7) #define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4)) #define CNFGA_ID_SHIFT 4 #define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT) #define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT) #define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT) /* #define CNFGA_reserved (1U << 3) */ #define CNFGA_nBYTEINTRANS (1U << 2) #define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0)) /* ECP Configuration Register B */ #define CNFGB_COMPRESS (1U << 7) #define CNFGB_INTRVAL (1U << 6) #define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3)) #define CNFGB_IRQ_SHIFT 3 #define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0)) #define CNFGB_DMA_SHIFT 0 /* Extended Control Register */ #define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5)) #define ECR_MODE_SHIFT 5 #define ECR_MODE_SPP (00U << ECR_MODE_SHIFT) #define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT) #define ECR_MODE_PPF (02U << ECR_MODE_SHIFT) #define ECR_MODE_ECP (03U << ECR_MODE_SHIFT) #define ECR_MODE_EPP (04U << ECR_MODE_SHIFT) /* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */ #define ECR_MODE_TST (06U << ECR_MODE_SHIFT) #define ECR_MODE_CFG (07U << ECR_MODE_SHIFT) #define ECR_nERRINTR (1U << 4) #define ECR_DMAEN (1U << 3) #define ECR_SERVINTR (1U << 2) #define ECR_F_FULL (1U << 1) #define ECR_F_EMPTY (1U << 0) /*--- Private data -----------------------------------------------------*/ /** * enum parport_ip32_irq_mode - operation mode of interrupt handler * @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer * @PARPORT_IP32_IRQ_HERE: interrupt is handled locally */ enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE }; /** * struct parport_ip32_private - private stuff for &struct parport * @regs: register addresses * @dcr_cache: cached contents of DCR * @dcr_writable: bit mask of writable DCR bits * @pword: number of bytes per PWord * @fifo_depth: number of PWords that FIFO will hold * @readIntrThreshold: minimum number of PWords we can read * if we get an interrupt * @writeIntrThreshold: minimum number of PWords we can write * if we get an interrupt * @irq_mode: operation mode of interrupt handler for this port * @irq_complete: mutex used to wait for an interrupt to occur */ struct parport_ip32_private { struct parport_ip32_regs regs; unsigned int dcr_cache; unsigned int dcr_writable; unsigned int pword; unsigned int fifo_depth; unsigned int readIntrThreshold; unsigned int writeIntrThreshold; enum parport_ip32_irq_mode irq_mode; struct completion irq_complete; }; /*--- Debug code -------------------------------------------------------*/ /* * pr_debug1 - print debug messages * * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1 */ #if DEBUG_PARPORT_IP32 >= 1 # define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__) #else /* DEBUG_PARPORT_IP32 < 1 */ # define pr_debug1(...) do { } while (0) #endif /* * pr_trace, pr_trace1 - trace function calls * @p: pointer to &struct parport * @fmt: printk format string * @...: parameters for format string * * Macros used to trace function calls. The given string is formatted after * function name. pr_trace() uses pr_debug(), and pr_trace1() uses * pr_debug1(). __pr_trace() is the low-level macro and is not to be used * directly. */ #define __pr_trace(pr, p, fmt, ...) \ pr("%s: %s" fmt "\n", \ ({ const struct parport *__p = (p); \ __p ? __p->name : "parport_ip32"; }), \ __func__ , ##__VA_ARGS__) #define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__) #define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__) /* * __pr_probe, pr_probe - print message if @verbose_probing is true * @p: pointer to &struct parport * @fmt: printk format string * @...: parameters for format string * * For new lines, use pr_probe(). Use __pr_probe() for continued lines. */ #define __pr_probe(...) \ do { if (verbose_probing) printk(__VA_ARGS__); } while (0) #define pr_probe(p, fmt, ...) \ __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__) /* * parport_ip32_dump_state - print register status of parport * @p: pointer to &struct parport * @str: string to add in message * @show_ecp_config: shall we dump ECP configuration registers too? * * This function is only here for debugging purpose, and should be used with * care. Reading the parallel port registers may have undesired side effects. * Especially if @show_ecp_config is true, the parallel port is resetted. * This function is only defined if %DEBUG_PARPORT_IP32 >= 2. */ #if DEBUG_PARPORT_IP32 >= 2 static void parport_ip32_dump_state(struct parport *p, char *str, unsigned int show_ecp_config) { struct parport_ip32_private * const priv = p->physport->private_data; unsigned int i; printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str); { static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF", "ECP", "EPP", "???", "TST", "CFG"}; unsigned int ecr = readb(priv->regs.ecr); printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr); printk(" %s", ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]); if (ecr & ECR_nERRINTR) printk(",nErrIntrEn"); if (ecr & ECR_DMAEN) printk(",dmaEn"); if (ecr & ECR_SERVINTR) printk(",serviceIntr"); if (ecr & ECR_F_FULL) printk(",f_full"); if (ecr & ECR_F_EMPTY) printk(",f_empty"); printk("\n"); } if (show_ecp_config) { unsigned int oecr, cnfgA, cnfgB; oecr = readb(priv->regs.ecr); writeb(ECR_MODE_PS2, priv->regs.ecr); writeb(ECR_MODE_CFG, priv->regs.ecr); cnfgA = readb(priv->regs.cnfgA); cnfgB = readb(priv->regs.cnfgB); writeb(ECR_MODE_PS2, priv->regs.ecr); writeb(oecr, priv->regs.ecr); printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA); printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses"); switch (cnfgA & CNFGA_ID_MASK) { case CNFGA_ID_8: printk(",8 bits"); break; case CNFGA_ID_16: printk(",16 bits"); break; case CNFGA_ID_32: printk(",32 bits"); break; default: printk(",unknown ID"); break; } if (!(cnfgA & CNFGA_nBYTEINTRANS)) printk(",ByteInTrans"); if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8) printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT, ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : ""); printk("\n"); printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB); printk(" irq=%u,dma=%u", (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT, (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT); printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL)); if (cnfgB & CNFGB_COMPRESS) printk(",compress"); printk("\n"); } for (i = 0; i < 2; i++) { unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr); printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x", i ? "soft" : "hard", dcr); printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd"); if (dcr & DCR_IRQ) printk(",ackIntEn"); if (!(dcr & DCR_SELECT)) printk(",nSelectIn"); if (dcr & DCR_nINIT) printk(",nInit"); if (!(dcr & DCR_AUTOFD)) printk(",nAutoFD"); if (!(dcr & DCR_STROBE)) printk(",nStrobe"); printk("\n"); } #define sep (f++ ? ',' : ' ') { unsigned int f = 0; unsigned int dsr = readb(priv->regs.dsr); printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr); if (!(dsr & DSR_nBUSY)) printk("%cBusy", sep); if (dsr & DSR_nACK) printk("%cnAck", sep); if (dsr & DSR_PERROR) printk("%cPError", sep); if (dsr & DSR_SELECT) printk("%cSelect", sep); if (dsr & DSR_nFAULT) printk("%cnFault", sep); if (!(dsr & DSR_nPRINT)) printk("%c(Print)", sep); if (dsr & DSR_TIMEOUT) printk("%cTimeout", sep); printk("\n"); } #undef sep } #else /* DEBUG_PARPORT_IP32 < 2 */ #define parport_ip32_dump_state(...) do { } while (0) #endif /* * CHECK_EXTRA_BITS - track and log extra bits * @p: pointer to &struct parport * @b: byte to inspect * @m: bit mask of authorized bits * * This is used to track and log extra bits that should not be there in * parport_ip32_write_control() and parport_ip32_frob_control(). It is only * defined if %DEBUG_PARPORT_IP32 >= 1. */ #if DEBUG_PARPORT_IP32 >= 1 #define CHECK_EXTRA_BITS(p, b, m) \ do { \ unsigned int __b = (b), __m = (m); \ if (__b & ~__m) \ pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \ "0x%02x/0x%02x\n", \ (p)->name, __func__, #b, __b, __m); \ } while (0) #else /* DEBUG_PARPORT_IP32 < 1 */ #define CHECK_EXTRA_BITS(...) do { } while (0) #endif /*--- IP32 parallel port DMA operations --------------------------------*/ /** * struct parport_ip32_dma_data - private data needed for DMA operation * @dir: DMA direction (from or to device) * @buf: buffer physical address * @len: buffer length * @next: address of next bytes to DMA transfer * @left: number of bytes remaining * @ctx: next context to write (0: context_a; 1: context_b) * @irq_on: are the DMA IRQs currently enabled? * @lock: spinlock to protect access to the structure */ struct parport_ip32_dma_data { enum dma_data_direction dir; dma_addr_t buf; dma_addr_t next; size_t len; size_t left; unsigned int ctx; unsigned int irq_on; spinlock_t lock; }; static struct parport_ip32_dma_data parport_ip32_dma; /** * parport_ip32_dma_setup_context - setup next DMA context * @limit: maximum data size for the context * * The alignment constraints must be verified in caller function, and the * parameter @limit must be set accordingly. */ static void parport_ip32_dma_setup_context(unsigned int limit) { unsigned long flags; spin_lock_irqsave(&parport_ip32_dma.lock, flags); if (parport_ip32_dma.left > 0) { /* Note: ctxreg is "volatile" here only because * mace->perif.ctrl.parport.context_a and context_b are * "volatile". */ volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ? &mace->perif.ctrl.parport.context_a : &mace->perif.ctrl.parport.context_b; u64 count; u64 ctxval; if (parport_ip32_dma.left <= limit) { count = parport_ip32_dma.left; ctxval = MACEPAR_CONTEXT_LASTFLAG; } else { count = limit; ctxval = 0; } pr_trace(NULL, "(%u): 0x%04x:0x%04x, %u -> %u%s", limit, (unsigned int)parport_ip32_dma.buf, (unsigned int)parport_ip32_dma.next, (unsigned int)count, parport_ip32_dma.ctx, ctxval ? "*" : ""); ctxval |= parport_ip32_dma.next & MACEPAR_CONTEXT_BASEADDR_MASK; ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) & MACEPAR_CONTEXT_DATALEN_MASK; writeq(ctxval, ctxreg); parport_ip32_dma.next += count; parport_ip32_dma.left -= count; parport_ip32_dma.ctx ^= 1U; } /* If there is nothing more to send, disable IRQs to avoid to * face an IRQ storm which can lock the machine. Disable them * only once. */ if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) { pr_debug(PPIP32 "IRQ off (ctx)\n"); disable_irq_nosync(MACEISA_PAR_CTXA_IRQ); disable_irq_nosync(MACEISA_PAR_CTXB_IRQ); parport_ip32_dma.irq_on = 0; } spin_unlock_irqrestore(&parport_ip32_dma.lock, flags); } /** * parport_ip32_dma_interrupt - DMA interrupt handler * @irq: interrupt number * @dev_id: unused */ static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id) { if (parport_ip32_dma.left) pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx); parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND); return IRQ_HANDLED; } #if DEBUG_PARPORT_IP32 static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id) { pr_trace1(NULL, "(%d)", irq); return IRQ_HANDLED; } #endif /** * parport_ip32_dma_start - begins a DMA transfer * @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE * @addr: pointer to data buffer * @count: buffer size * * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be * correctly balanced. */ static int parport_ip32_dma_start(enum dma_data_direction dir, void *addr, size_t count) { unsigned int limit; u64 ctrl; pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count); /* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must * be 64 bytes aligned. */ BUG_ON(dir != DMA_TO_DEVICE); /* Reset DMA controller */ ctrl = MACEPAR_CTLSTAT_RESET; writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); /* DMA IRQs should normally be enabled */ if (!parport_ip32_dma.irq_on) { WARN_ON(1); enable_irq(MACEISA_PAR_CTXA_IRQ); enable_irq(MACEISA_PAR_CTXB_IRQ); parport_ip32_dma.irq_on = 1; } /* Prepare DMA pointers */ parport_ip32_dma.dir = dir; parport_ip32_dma.buf = dma_map_single(NULL, addr, count, dir); parport_ip32_dma.len = count; parport_ip32_dma.next = parport_ip32_dma.buf; parport_ip32_dma.left = parport_ip32_dma.len; parport_ip32_dma.ctx = 0; /* Setup DMA direction and first two contexts */ ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION; writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); /* Single transfer should not cross a 4K page boundary */ limit = MACEPAR_CONTEXT_DATA_BOUND - (parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1)); parport_ip32_dma_setup_context(limit); parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND); /* Real start of DMA transfer */ ctrl |= MACEPAR_CTLSTAT_ENABLE; writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); return 0; } /** * parport_ip32_dma_stop - ends a running DMA transfer * * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be * correctly balanced. */ static void parport_ip32_dma_stop(void) { u64 ctx_a; u64 ctx_b; u64 ctrl; u64 diag; size_t res[2]; /* {[0] = res_a, [1] = res_b} */ pr_trace(NULL, "()"); /* Disable IRQs */ spin_lock_irq(&parport_ip32_dma.lock); if (parport_ip32_dma.irq_on) { pr_debug(PPIP32 "IRQ off (stop)\n"); disable_irq_nosync(MACEISA_PAR_CTXA_IRQ); disable_irq_nosync(MACEISA_PAR_CTXB_IRQ); parport_ip32_dma.irq_on = 0; } spin_unlock_irq(&parport_ip32_dma.lock); /* Force IRQ synchronization, even if the IRQs were disabled * elsewhere. */ synchronize_irq(MACEISA_PAR_CTXA_IRQ); synchronize_irq(MACEISA_PAR_CTXB_IRQ); /* Stop DMA transfer */ ctrl = readq(&mace->perif.ctrl.parport.cntlstat); ctrl &= ~MACEPAR_CTLSTAT_ENABLE; writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); /* Adjust residue (parport_ip32_dma.left) */ ctx_a = readq(&mace->perif.ctrl.parport.context_a); ctx_b = readq(&mace->perif.ctrl.parport.context_b); ctrl = readq(&mace->perif.ctrl.parport.cntlstat); diag = readq(&mace->perif.ctrl.parport.diagnostic); res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ? 1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >> MACEPAR_CONTEXT_DATALEN_SHIFT) : 0; res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ? 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >> MACEPAR_CONTEXT_DATALEN_SHIFT) : 0; if (diag & MACEPAR_DIAG_DMACTIVE) res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] = 1 + ((diag & MACEPAR_DIAG_CTRMASK) >> MACEPAR_DIAG_CTRSHIFT); parport_ip32_dma.left += res[0] + res[1]; /* Reset DMA controller, and re-enable IRQs */ ctrl = MACEPAR_CTLSTAT_RESET; writeq(ctrl, &mace->perif.ctrl.parport.cntlstat); pr_debug(PPIP32 "IRQ on (stop)\n"); enable_irq(MACEISA_PAR_CTXA_IRQ); enable_irq(MACEISA_PAR_CTXB_IRQ); parport_ip32_dma.irq_on = 1; dma_unmap_single(NULL, parport_ip32_dma.buf, parport_ip32_dma.len, parport_ip32_dma.dir); } /** * parport_ip32_dma_get_residue - get residue from last DMA transfer * * Returns the number of bytes remaining from last DMA transfer. */ static inline size_t parport_ip32_dma_get_residue(void) { return parport_ip32_dma.left; } /** * parport_ip32_dma_register - initialize DMA engine * * Returns zero for success. */ static int parport_ip32_dma_register(void) { int err; spin_lock_init(&parport_ip32_dma.lock); parport_ip32_dma.irq_on = 1; /* Reset DMA controller */ writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat); /* Request IRQs */ err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt, 0, "parport_ip32", NULL); if (err) goto fail_a; err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt, 0, "parport_ip32", NULL); if (err) goto fail_b; #if DEBUG_PARPORT_IP32 /* FIXME - what is this IRQ for? */ err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt, 0, "parport_ip32", NULL); if (err) goto fail_merr; #endif return 0; #if DEBUG_PARPORT_IP32 fail_merr: free_irq(MACEISA_PAR_CTXB_IRQ, NULL); #endif fail_b: free_irq(MACEISA_PAR_CTXA_IRQ, NULL); fail_a: return err; } /** * parport_ip32_dma_unregister - release and free resources for DMA engine */ static void parport_ip32_dma_unregister(void) { #if DEBUG_PARPORT_IP32 free_irq(MACEISA_PAR_MERR_IRQ, NULL); #endif free_irq(MACEISA_PAR_CTXB_IRQ, NULL); free_irq(MACEISA_PAR_CTXA_IRQ, NULL); } /*--- Interrupt handlers and associates --------------------------------*/ /** * parport_ip32_wakeup - wakes up code waiting for an interrupt * @p: pointer to &struct parport */ static inline void parport_ip32_wakeup(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; complete(&priv->irq_complete); } /** * parport_ip32_interrupt - interrupt handler * @irq: interrupt number * @dev_id: pointer to &struct parport * * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is * %PARPORT_IP32_IRQ_FWD. */ static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id) { struct parport * const p = dev_id; struct parport_ip32_private * const priv = p->physport->private_data; enum parport_ip32_irq_mode irq_mode = priv->irq_mode; switch (irq_mode) { case PARPORT_IP32_IRQ_FWD: return parport_irq_handler(irq, dev_id); case PARPORT_IP32_IRQ_HERE: parport_ip32_wakeup(p); break; } return IRQ_HANDLED; } /*--- Some utility function to manipulate ECR register -----------------*/ /** * parport_ip32_read_econtrol - read contents of the ECR register * @p: pointer to &struct parport */ static inline unsigned int parport_ip32_read_econtrol(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; return readb(priv->regs.ecr); } /** * parport_ip32_write_econtrol - write new contents to the ECR register * @p: pointer to &struct parport * @c: new value to write */ static inline void parport_ip32_write_econtrol(struct parport *p, unsigned int c) { struct parport_ip32_private * const priv = p->physport->private_data; writeb(c, priv->regs.ecr); } /** * parport_ip32_frob_econtrol - change bits from the ECR register * @p: pointer to &struct parport * @mask: bit mask of bits to change * @val: new value for changed bits * * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits * in @val, and write the result to the ECR. */ static inline void parport_ip32_frob_econtrol(struct parport *p, unsigned int mask, unsigned int val) { unsigned int c; c = (parport_ip32_read_econtrol(p) & ~mask) ^ val; parport_ip32_write_econtrol(p, c); } /** * parport_ip32_set_mode - change mode of ECP port * @p: pointer to &struct parport * @mode: new mode to write in ECR * * ECR is reset in a sane state (interrupts and DMA disabled), and placed in * mode @mode. Go through PS2 mode if needed. */ static void parport_ip32_set_mode(struct parport *p, unsigned int mode) { unsigned int omode; mode &= ECR_MODE_MASK; omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK; if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2 || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) { /* We have to go through PS2 mode */ unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR; parport_ip32_write_econtrol(p, ecr); } parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR); } /*--- Basic functions needed for parport -------------------------------*/ /** * parport_ip32_read_data - return current contents of the DATA register * @p: pointer to &struct parport */ static inline unsigned char parport_ip32_read_data(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; return readb(priv->regs.data); } /** * parport_ip32_write_data - set new contents for the DATA register * @p: pointer to &struct parport * @d: new value to write */ static inline void parport_ip32_write_data(struct parport *p, unsigned char d) { struct parport_ip32_private * const priv = p->physport->private_data; writeb(d, priv->regs.data); } /** * parport_ip32_read_status - return current contents of the DSR register * @p: pointer to &struct parport */ static inline unsigned char parport_ip32_read_status(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; return readb(priv->regs.dsr); } /** * __parport_ip32_read_control - return cached contents of the DCR register * @p: pointer to &struct parport */ static inline unsigned int __parport_ip32_read_control(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; return priv->dcr_cache; /* use soft copy */ } /** * __parport_ip32_write_control - set new contents for the DCR register * @p: pointer to &struct parport * @c: new value to write */ static inline void __parport_ip32_write_control(struct parport *p, unsigned int c) { struct parport_ip32_private * const priv = p->physport->private_data; CHECK_EXTRA_BITS(p, c, priv->dcr_writable); c &= priv->dcr_writable; /* only writable bits */ writeb(c, priv->regs.dcr); priv->dcr_cache = c; /* update soft copy */ } /** * __parport_ip32_frob_control - change bits from the DCR register * @p: pointer to &struct parport * @mask: bit mask of bits to change * @val: new value for changed bits * * This is equivalent to read from the DCR, mask out the bits in @mask, * exclusive-or with the bits in @val, and write the result to the DCR. * Actually, the cached contents of the DCR is used. */ static inline void __parport_ip32_frob_control(struct parport *p, unsigned int mask, unsigned int val) { unsigned int c; c = (__parport_ip32_read_control(p) & ~mask) ^ val; __parport_ip32_write_control(p, c); } /** * parport_ip32_read_control - return cached contents of the DCR register * @p: pointer to &struct parport * * The return value is masked so as to only return the value of %DCR_STROBE, * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT. */ static inline unsigned char parport_ip32_read_control(struct parport *p) { const unsigned int rm = DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT; return __parport_ip32_read_control(p) & rm; } /** * parport_ip32_write_control - set new contents for the DCR register * @p: pointer to &struct parport * @c: new value to write * * The value is masked so as to only change the value of %DCR_STROBE, * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT. */ static inline void parport_ip32_write_control(struct parport *p, unsigned char c) { const unsigned int wm = DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT; CHECK_EXTRA_BITS(p, c, wm); __parport_ip32_frob_control(p, wm, c & wm); } /** * parport_ip32_frob_control - change bits from the DCR register * @p: pointer to &struct parport * @mask: bit mask of bits to change * @val: new value for changed bits * * This differs from __parport_ip32_frob_control() in that it only allows to * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT. */ static inline unsigned char parport_ip32_frob_control(struct parport *p, unsigned char mask, unsigned char val) { const unsigned int wm = DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT; CHECK_EXTRA_BITS(p, mask, wm); CHECK_EXTRA_BITS(p, val, wm); __parport_ip32_frob_control(p, mask & wm, val & wm); return parport_ip32_read_control(p); } /** * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK * @p: pointer to &struct parport */ static inline void parport_ip32_disable_irq(struct parport *p) { __parport_ip32_frob_control(p, DCR_IRQ, 0); } /** * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK * @p: pointer to &struct parport */ static inline void parport_ip32_enable_irq(struct parport *p) { __parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ); } /** * parport_ip32_data_forward - enable host-to-peripheral communications * @p: pointer to &struct parport * * Enable the data line drivers, for 8-bit host-to-peripheral communications. */ static inline void parport_ip32_data_forward(struct parport *p) { __parport_ip32_frob_control(p, DCR_DIR, 0); } /** * parport_ip32_data_reverse - enable peripheral-to-host communications * @p: pointer to &struct parport * * Place the data bus in a high impedance state, if @p->modes has the * PARPORT_MODE_TRISTATE bit set. */ static inline void parport_ip32_data_reverse(struct parport *p) { __parport_ip32_frob_control(p, DCR_DIR, DCR_DIR); } /** * parport_ip32_init_state - for core parport code * @dev: pointer to &struct pardevice * @s: pointer to &struct parport_state to initialize */ static void parport_ip32_init_state(struct pardevice *dev, struct parport_state *s) { s->u.ip32.dcr = DCR_SELECT | DCR_nINIT; s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR; } /** * parport_ip32_save_state - for core parport code * @p: pointer to &struct parport * @s: pointer to &struct parport_state to save state to */ static void parport_ip32_save_state(struct parport *p, struct parport_state *s) { s->u.ip32.dcr = __parport_ip32_read_control(p); s->u.ip32.ecr = parport_ip32_read_econtrol(p); } /** * parport_ip32_restore_state - for core parport code * @p: pointer to &struct parport * @s: pointer to &struct parport_state to restore state from */ static void parport_ip32_restore_state(struct parport *p, struct parport_state *s) { parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK); parport_ip32_write_econtrol(p, s->u.ip32.ecr); __parport_ip32_write_control(p, s->u.ip32.dcr); } /*--- EPP mode functions -----------------------------------------------*/ /** * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode * @p: pointer to &struct parport * * Returns 1 if the Timeout bit is clear, and 0 otherwise. */ static unsigned int parport_ip32_clear_epp_timeout(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; unsigned int cleared; if (!(parport_ip32_read_status(p) & DSR_TIMEOUT)) cleared = 1; else { unsigned int r; /* To clear timeout some chips require double read */ parport_ip32_read_status(p); r = parport_ip32_read_status(p); /* Some reset by writing 1 */ writeb(r | DSR_TIMEOUT, priv->regs.dsr); /* Others by writing 0 */ writeb(r & ~DSR_TIMEOUT, priv->regs.dsr); r = parport_ip32_read_status(p); cleared = !(r & DSR_TIMEOUT); } pr_trace(p, "(): %s", cleared ? "cleared" : "failed"); return cleared; } /** * parport_ip32_epp_read - generic EPP read function * @eppreg: I/O register to read from * @p: pointer to &struct parport * @buf: buffer to store read data * @len: length of buffer @buf * @flags: may be PARPORT_EPP_FAST */ static size_t parport_ip32_epp_read(void __iomem *eppreg, struct parport *p, void *buf, size_t len, int flags) { struct parport_ip32_private * const priv = p->physport->private_data; size_t got; parport_ip32_set_mode(p, ECR_MODE_EPP); parport_ip32_data_reverse(p); parport_ip32_write_control(p, DCR_nINIT); if ((flags & PARPORT_EPP_FAST) && (len > 1)) { readsb(eppreg, buf, len); if (readb(priv->regs.dsr) & DSR_TIMEOUT) { parport_ip32_clear_epp_timeout(p); return -EIO; } got = len; } else { u8 *bufp = buf; for (got = 0; got < len; got++) { *bufp++ = readb(eppreg); if (readb(priv->regs.dsr) & DSR_TIMEOUT) { parport_ip32_clear_epp_timeout(p); break; } } } parport_ip32_data_forward(p); parport_ip32_set_mode(p, ECR_MODE_PS2); return got; } /** * parport_ip32_epp_write - generic EPP write function * @eppreg: I/O register to write to * @p: pointer to &struct parport * @buf: buffer of data to write * @len: length of buffer @buf * @flags: may be PARPORT_EPP_FAST */ static size_t parport_ip32_epp_write(void __iomem *eppreg, struct parport *p, const void *buf, size_t len, int flags) { struct parport_ip32_private * const priv = p->physport->private_data; size_t written; parport_ip32_set_mode(p, ECR_MODE_EPP); parport_ip32_data_forward(p); parport_ip32_write_control(p, DCR_nINIT); if ((flags & PARPORT_EPP_FAST) && (len > 1)) { writesb(eppreg, buf, len); if (readb(priv->regs.dsr) & DSR_TIMEOUT) { parport_ip32_clear_epp_timeout(p); return -EIO; } written = len; } else { const u8 *bufp = buf; for (written = 0; written < len; written++) { writeb(*bufp++, eppreg); if (readb(priv->regs.dsr) & DSR_TIMEOUT) { parport_ip32_clear_epp_timeout(p); break; } } } parport_ip32_set_mode(p, ECR_MODE_PS2); return written; } /** * parport_ip32_epp_read_data - read a block of data in EPP mode * @p: pointer to &struct parport * @buf: buffer to store read data * @len: length of buffer @buf * @flags: may be PARPORT_EPP_FAST */ static size_t parport_ip32_epp_read_data(struct parport *p, void *buf, size_t len, int flags) { struct parport_ip32_private * const priv = p->physport->private_data; return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags); } /** * parport_ip32_epp_write_data - write a block of data in EPP mode * @p: pointer to &struct parport * @buf: buffer of data to write * @len: length of buffer @buf * @flags: may be PARPORT_EPP_FAST */ static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf, size_t len, int flags) { struct parport_ip32_private * const priv = p->physport->private_data; return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags); } /** * parport_ip32_epp_read_addr - read a block of addresses in EPP mode * @p: pointer to &struct parport * @buf: buffer to store read data * @len: length of buffer @buf * @flags: may be PARPORT_EPP_FAST */ static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf, size_t len, int flags) { struct parport_ip32_private * const priv = p->physport->private_data; return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags); } /** * parport_ip32_epp_write_addr - write a block of addresses in EPP mode * @p: pointer to &struct parport * @buf: buffer of data to write * @len: length of buffer @buf * @flags: may be PARPORT_EPP_FAST */ static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf, size_t len, int flags) { struct parport_ip32_private * const priv = p->physport->private_data; return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags); } /*--- ECP mode functions (FIFO) ----------------------------------------*/ /** * parport_ip32_fifo_wait_break - check if the waiting function should return * @p: pointer to &struct parport * @expire: timeout expiring date, in jiffies * * parport_ip32_fifo_wait_break() checks if the waiting function should return * immediately or not. The break conditions are: * - expired timeout; * - a pending signal; * - nFault asserted low. * This function also calls cond_resched(). */ static unsigned int parport_ip32_fifo_wait_break(struct parport *p, unsigned long expire) { cond_resched(); if (time_after(jiffies, expire)) { pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name); return 1; } if (signal_pending(current)) { pr_debug1(PPIP32 "%s: Signal pending\n", p->name); return 1; } if (!(parport_ip32_read_status(p) & DSR_nFAULT)) { pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name); return 1; } return 0; } /** * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling) * @p: pointer to &struct parport * * Returns the number of bytes that can safely be written in the FIFO. A * return value of zero means that the calling function should terminate as * fast as possible. */ static unsigned int parport_ip32_fwp_wait_polling(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; struct parport * const physport = p->physport; unsigned long expire; unsigned int count; unsigned int ecr; expire = jiffies + physport->cad->timeout; count = 0; while (1) { if (parport_ip32_fifo_wait_break(p, expire)) break; /* Check FIFO state. We do nothing when the FIFO is nor full, * nor empty. It appears that the FIFO full bit is not always * reliable, the FIFO state is sometimes wrongly reported, and * the chip gets confused if we give it another byte. */ ecr = parport_ip32_read_econtrol(p); if (ecr & ECR_F_EMPTY) { /* FIFO is empty, fill it up */ count = priv->fifo_depth; break; } /* Wait a moment... */ udelay(FIFO_POLLING_INTERVAL); } /* while (1) */ return count; } /** * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven) * @p: pointer to &struct parport * * Returns the number of bytes that can safely be written in the FIFO. A * return value of zero means that the calling function should terminate as * fast as possible. */ static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p) { static unsigned int lost_interrupt = 0; struct parport_ip32_private * const priv = p->physport->private_data; struct parport * const physport = p->physport; unsigned long nfault_timeout; unsigned long expire; unsigned int count; unsigned int ecr; nfault_timeout = min((unsigned long)physport->cad->timeout, msecs_to_jiffies(FIFO_NFAULT_TIMEOUT)); expire = jiffies + physport->cad->timeout; count = 0; while (1) { if (parport_ip32_fifo_wait_break(p, expire)) break; /* Initialize mutex used to take interrupts into account */ INIT_COMPLETION(priv->irq_complete); /* Enable serviceIntr */ parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0); /* Enabling serviceIntr while the FIFO is empty does not * always generate an interrupt, so check for emptiness * now. */ ecr = parport_ip32_read_econtrol(p); if (!(ecr & ECR_F_EMPTY)) { /* FIFO is not empty: wait for an interrupt or a * timeout to occur */ wait_for_completion_interruptible_timeout( &priv->irq_complete, nfault_timeout); ecr = parport_ip32_read_econtrol(p); if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR) && !lost_interrupt) { printk(KERN_WARNING PPIP32 "%s: lost interrupt in %s\n", p->name, __func__); lost_interrupt = 1; } } /* Disable serviceIntr */ parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR); /* Check FIFO state */ if (ecr & ECR_F_EMPTY) { /* FIFO is empty, fill it up */ count = priv->fifo_depth; break; } else if (ecr & ECR_SERVINTR) { /* FIFO is not empty, but we know that can safely push * writeIntrThreshold bytes into it */ count = priv->writeIntrThreshold; break; } /* FIFO is not empty, and we did not get any interrupt. * Either it's time to check for nFault, or a signal is * pending. This is verified in * parport_ip32_fifo_wait_break(), so we continue the loop. */ } /* while (1) */ return count; } /** * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode) * @p: pointer to &struct parport * @buf: buffer of data to write * @len: length of buffer @buf * * Uses PIO to write the contents of the buffer @buf into the parallel port * FIFO. Returns the number of bytes that were actually written. It can work * with or without the help of interrupts. The parallel port must be * correctly initialized before calling parport_ip32_fifo_write_block_pio(). */ static size_t parport_ip32_fifo_write_block_pio(struct parport *p, const void *buf, size_t len) { struct parport_ip32_private * const priv = p->physport->private_data; const u8 *bufp = buf; size_t left = len; priv->irq_mode = PARPORT_IP32_IRQ_HERE; while (left > 0) { unsigned int count; count = (p->irq == PARPORT_IRQ_NONE) ? parport_ip32_fwp_wait_polling(p) : parport_ip32_fwp_wait_interrupt(p); if (count == 0) break; /* Transmission should be stopped */ if (count > left) count = left; if (count == 1) { writeb(*bufp, priv->regs.fifo); bufp++, left--; } else { writesb(priv->regs.fifo, bufp, count); bufp += count, left -= count; } } priv->irq_mode = PARPORT_IP32_IRQ_FWD; return len - left; } /** * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode) * @p: pointer to &struct parport * @buf: buffer of data to write * @len: length of buffer @buf * * Uses DMA to write the contents of the buffer @buf into the parallel port * FIFO. Returns the number of bytes that were actually written. The * parallel port must be correctly initialized before calling * parport_ip32_fifo_write_block_dma(). */ static size_t parport_ip32_fifo_write_block_dma(struct parport *p, const void *buf, size_t len) { struct parport_ip32_private * const priv = p->physport->private_data; struct parport * const physport = p->physport; unsigned long nfault_timeout; unsigned long expire; size_t written; unsigned int ecr; priv->irq_mode = PARPORT_IP32_IRQ_HERE; parport_ip32_dma_start(DMA_TO_DEVICE, (void *)buf, len); INIT_COMPLETION(priv->irq_complete); parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN); nfault_timeout = min((unsigned long)physport->cad->timeout, msecs_to_jiffies(FIFO_NFAULT_TIMEOUT)); expire = jiffies + physport->cad->timeout; while (1) { if (parport_ip32_fifo_wait_break(p, expire)) break; wait_for_completion_interruptible_timeout(&priv->irq_complete, nfault_timeout); ecr = parport_ip32_read_econtrol(p); if (ecr & ECR_SERVINTR) break; /* DMA transfer just finished */ } parport_ip32_dma_stop(); written = len - parport_ip32_dma_get_residue(); priv->irq_mode = PARPORT_IP32_IRQ_FWD; return written; } /** * parport_ip32_fifo_write_block - write a block of data * @p: pointer to &struct parport * @buf: buffer of data to write * @len: length of buffer @buf * * Uses PIO or DMA to write the contents of the buffer @buf into the parallel * p FIFO. Returns the number of bytes that were actually written. */ static size_t parport_ip32_fifo_write_block(struct parport *p, const void *buf, size_t len) { size_t written = 0; if (len) /* FIXME - Maybe some threshold value should be set for @len * under which we revert to PIO mode? */ written = (p->modes & PARPORT_MODE_DMA) ? parport_ip32_fifo_write_block_dma(p, buf, len) : parport_ip32_fifo_write_block_pio(p, buf, len); return written; } /** * parport_ip32_drain_fifo - wait for FIFO to empty * @p: pointer to &struct parport * @timeout: timeout, in jiffies * * This function waits for FIFO to empty. It returns 1 when FIFO is empty, or * 0 if the timeout @timeout is reached before, or if a signal is pending. */ static unsigned int parport_ip32_drain_fifo(struct parport *p, unsigned long timeout) { unsigned long expire = jiffies + timeout; unsigned int polling_interval; unsigned int counter; /* Busy wait for approx. 200us */ for (counter = 0; counter < 40; counter++) { if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY) break; if (time_after(jiffies, expire)) break; if (signal_pending(current)) break; udelay(5); } /* Poll slowly. Polling interval starts with 1 millisecond, and is * increased exponentially until 128. */ polling_interval = 1; /* msecs */ while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) { if (time_after_eq(jiffies, expire)) break; msleep_interruptible(polling_interval); if (signal_pending(current)) break; if (polling_interval < 128) polling_interval *= 2; } return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY); } /** * parport_ip32_get_fifo_residue - reset FIFO * @p: pointer to &struct parport * @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP) * * This function resets FIFO, and returns the number of bytes remaining in it. */ static unsigned int parport_ip32_get_fifo_residue(struct parport *p, unsigned int mode) { struct parport_ip32_private * const priv = p->physport->private_data; unsigned int residue; unsigned int cnfga; /* FIXME - We are missing one byte if the printer is off-line. I * don't know how to detect this. It looks that the full bit is not * always reliable. For the moment, the problem is avoided in most * cases by testing for BUSY in parport_ip32_compat_write_data(). */ if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY) residue = 0; else { pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name); /* Stop all transfers. * * Microsoft's document instructs to drive DCR_STROBE to 0, * but it doesn't work (at least in Compatibility mode, not * tested in ECP mode). Switching directly to Test mode (as * in parport_pc) is not an option: it does confuse the port, * ECP service interrupts are no more working after that. A * hard reset is then needed to revert to a sane state. * * Let's hope that the FIFO is really stuck and that the * peripheral doesn't wake up now. */ parport_ip32_frob_control(p, DCR_STROBE, 0); /* Fill up FIFO */ for (residue = priv->fifo_depth; residue > 0; residue--) { if (parport_ip32_read_econtrol(p) & ECR_F_FULL) break; writeb(0x00, priv->regs.fifo); } } if (residue) pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n", p->name, residue, (residue == 1) ? " was" : "s were"); /* Now reset the FIFO */ parport_ip32_set_mode(p, ECR_MODE_PS2); /* Host recovery for ECP mode */ if (mode == ECR_MODE_ECP) { parport_ip32_data_reverse(p); parport_ip32_frob_control(p, DCR_nINIT, 0); if (parport_wait_peripheral(p, DSR_PERROR, 0)) pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n", p->name, __func__); parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE); parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT); if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n", p->name, __func__); } /* Adjust residue if needed */ parport_ip32_set_mode(p, ECR_MODE_CFG); cnfga = readb(priv->regs.cnfgA); if (!(cnfga & CNFGA_nBYTEINTRANS)) { pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n", p->name, cnfga); pr_debug1(PPIP32 "%s: Accounting for extra byte\n", p->name); residue++; } /* Don't care about partial PWords since we do not support * PWord != 1 byte. */ /* Back to forward PS2 mode. */ parport_ip32_set_mode(p, ECR_MODE_PS2); parport_ip32_data_forward(p); return residue; } /** * parport_ip32_compat_write_data - write a block of data in SPP mode * @p: pointer to &struct parport * @buf: buffer of data to write * @len: length of buffer @buf * @flags: ignored */ static size_t parport_ip32_compat_write_data(struct parport *p, const void *buf, size_t len, int flags) { static unsigned int ready_before = 1; struct parport_ip32_private * const priv = p->physport->private_data; struct parport * const physport = p->physport; size_t written = 0; /* Special case: a timeout of zero means we cannot call schedule(). * Also if O_NONBLOCK is set then use the default implementation. */ if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK) return parport_ieee1284_write_compat(p, buf, len, flags); /* Reset FIFO, go in forward mode, and disable ackIntEn */ parport_ip32_set_mode(p, ECR_MODE_PS2); parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT); parport_ip32_data_forward(p); parport_ip32_disable_irq(p); parport_ip32_set_mode(p, ECR_MODE_PPF); physport->ieee1284.phase = IEEE1284_PH_FWD_DATA; /* Wait for peripheral to become ready */ if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT, DSR_nBUSY | DSR_nFAULT)) { /* Avoid to flood the logs */ if (ready_before) printk(KERN_INFO PPIP32 "%s: not ready in %s\n", p->name, __func__); ready_before = 0; goto stop; } ready_before = 1; written = parport_ip32_fifo_write_block(p, buf, len); /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */ parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth); /* Check for a potential residue */ written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF); /* Then, wait for BUSY to get low. */ if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY)) printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n", p->name, __func__); stop: /* Reset FIFO */ parport_ip32_set_mode(p, ECR_MODE_PS2); physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE; return written; } /* * FIXME - Insert here parport_ip32_ecp_read_data(). */ /** * parport_ip32_ecp_write_data - write a block of data in ECP mode * @p: pointer to &struct parport * @buf: buffer of data to write * @len: length of buffer @buf * @flags: ignored */ static size_t parport_ip32_ecp_write_data(struct parport *p, const void *buf, size_t len, int flags) { static unsigned int ready_before = 1; struct parport_ip32_private * const priv = p->physport->private_data; struct parport * const physport = p->physport; size_t written = 0; /* Special case: a timeout of zero means we cannot call schedule(). * Also if O_NONBLOCK is set then use the default implementation. */ if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK) return parport_ieee1284_ecp_write_data(p, buf, len, flags); /* Negotiate to forward mode if necessary. */ if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) { /* Event 47: Set nInit high. */ parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD, DCR_nINIT | DCR_AUTOFD); /* Event 49: PError goes high. */ if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) { printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s", p->name, __func__); physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN; return 0; } } /* Reset FIFO, go in forward mode, and disable ackIntEn */ parport_ip32_set_mode(p, ECR_MODE_PS2); parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT); parport_ip32_data_forward(p); parport_ip32_disable_irq(p); parport_ip32_set_mode(p, ECR_MODE_ECP); physport->ieee1284.phase = IEEE1284_PH_FWD_DATA; /* Wait for peripheral to become ready */ if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT, DSR_nBUSY | DSR_nFAULT)) { /* Avoid to flood the logs */ if (ready_before) printk(KERN_INFO PPIP32 "%s: not ready in %s\n", p->name, __func__); ready_before = 0; goto stop; } ready_before = 1; written = parport_ip32_fifo_write_block(p, buf, len); /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */ parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth); /* Check for a potential residue */ written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP); /* Then, wait for BUSY to get low. */ if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY)) printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n", p->name, __func__); stop: /* Reset FIFO */ parport_ip32_set_mode(p, ECR_MODE_PS2); physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE; return written; } /* * FIXME - Insert here parport_ip32_ecp_write_addr(). */ /*--- Default parport operations ---------------------------------------*/ static __initdata struct parport_operations parport_ip32_ops = { .write_data = parport_ip32_write_data, .read_data = parport_ip32_read_data, .write_control = parport_ip32_write_control, .read_control = parport_ip32_read_control, .frob_control = parport_ip32_frob_control, .read_status = parport_ip32_read_status, .enable_irq = parport_ip32_enable_irq, .disable_irq = parport_ip32_disable_irq, .data_forward = parport_ip32_data_forward, .data_reverse = parport_ip32_data_reverse, .init_state = parport_ip32_init_state, .save_state = parport_ip32_save_state, .restore_state = parport_ip32_restore_state, .epp_write_data = parport_ieee1284_epp_write_data, .epp_read_data = parport_ieee1284_epp_read_data, .epp_write_addr = parport_ieee1284_epp_write_addr, .epp_read_addr = parport_ieee1284_epp_read_addr, .ecp_write_data = parport_ieee1284_ecp_write_data, .ecp_read_data = parport_ieee1284_ecp_read_data, .ecp_write_addr = parport_ieee1284_ecp_write_addr, .compat_write_data = parport_ieee1284_write_compat, .nibble_read_data = parport_ieee1284_read_nibble, .byte_read_data = parport_ieee1284_read_byte, .owner = THIS_MODULE, }; /*--- Device detection -------------------------------------------------*/ /** * parport_ip32_ecp_supported - check for an ECP port * @p: pointer to the &parport structure * * Returns 1 if an ECP port is found, and 0 otherwise. This function actually * checks if an Extended Control Register seems to be present. On successful * return, the port is placed in SPP mode. */ static __init unsigned int parport_ip32_ecp_supported(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; unsigned int ecr; ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR; writeb(ecr, priv->regs.ecr); if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY)) goto fail; pr_probe(p, "Found working ECR register\n"); parport_ip32_set_mode(p, ECR_MODE_SPP); parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT); return 1; fail: pr_probe(p, "ECR register not found\n"); return 0; } /** * parport_ip32_fifo_supported - check for FIFO parameters * @p: pointer to the &parport structure * * Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on * success, and 0 otherwise. Adjust FIFO parameters in the parport structure. * On return, the port is placed in SPP mode. */ static __init unsigned int parport_ip32_fifo_supported(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; unsigned int configa, configb; unsigned int pword; unsigned int i; /* Configuration mode */ parport_ip32_set_mode(p, ECR_MODE_CFG); configa = readb(priv->regs.cnfgA); configb = readb(priv->regs.cnfgB); /* Find out PWord size */ switch (configa & CNFGA_ID_MASK) { case CNFGA_ID_8: pword = 1; break; case CNFGA_ID_16: pword = 2; break; case CNFGA_ID_32: pword = 4; break; default: pr_probe(p, "Unknown implementation ID: 0x%0x\n", (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT); goto fail; break; } if (pword != 1) { pr_probe(p, "Unsupported PWord size: %u\n", pword); goto fail; } priv->pword = pword; pr_probe(p, "PWord is %u bits\n", 8 * priv->pword); /* Check for compression support */ writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB); if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS) pr_probe(p, "Hardware compression detected (unsupported)\n"); writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB); /* Reset FIFO and go in test mode (no interrupt, no DMA) */ parport_ip32_set_mode(p, ECR_MODE_TST); /* FIFO must be empty now */ if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) { pr_probe(p, "FIFO not reset\n"); goto fail; } /* Find out FIFO depth. */ priv->fifo_depth = 0; for (i = 0; i < 1024; i++) { if (readb(priv->regs.ecr) & ECR_F_FULL) { /* FIFO full */ priv->fifo_depth = i; break; } writeb((u8)i, priv->regs.fifo); } if (i >= 1024) { pr_probe(p, "Can't fill FIFO\n"); goto fail; } if (!priv->fifo_depth) { pr_probe(p, "Can't get FIFO depth\n"); goto fail; } pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth); /* Enable interrupts */ parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0); /* Find out writeIntrThreshold: number of PWords we know we can write * if we get an interrupt. */ priv->writeIntrThreshold = 0; for (i = 0; i < priv->fifo_depth; i++) { if (readb(priv->regs.fifo) != (u8)i) { pr_probe(p, "Invalid data in FIFO\n"); goto fail; } if (!priv->writeIntrThreshold && readb(priv->regs.ecr) & ECR_SERVINTR) /* writeIntrThreshold reached */ priv->writeIntrThreshold = i + 1; if (i + 1 < priv->fifo_depth && readb(priv->regs.ecr) & ECR_F_EMPTY) { /* FIFO empty before the last byte? */ pr_probe(p, "Data lost in FIFO\n"); goto fail; } } if (!priv->writeIntrThreshold) { pr_probe(p, "Can't get writeIntrThreshold\n"); goto fail; } pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold); /* FIFO must be empty now */ if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) { pr_probe(p, "Can't empty FIFO\n"); goto fail; } /* Reset FIFO */ parport_ip32_set_mode(p, ECR_MODE_PS2); /* Set reverse direction (must be in PS2 mode) */ parport_ip32_data_reverse(p); /* Test FIFO, no interrupt, no DMA */ parport_ip32_set_mode(p, ECR_MODE_TST); /* Enable interrupts */ parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0); /* Find out readIntrThreshold: number of PWords we can read if we get * an interrupt. */ priv->readIntrThreshold = 0; for (i = 0; i < priv->fifo_depth; i++) { writeb(0xaa, priv->regs.fifo); if (readb(priv->regs.ecr) & ECR_SERVINTR) { /* readIntrThreshold reached */ priv->readIntrThreshold = i + 1; break; } } if (!priv->readIntrThreshold) { pr_probe(p, "Can't get readIntrThreshold\n"); goto fail; } pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold); /* Reset ECR */ parport_ip32_set_mode(p, ECR_MODE_PS2); parport_ip32_data_forward(p); parport_ip32_set_mode(p, ECR_MODE_SPP); return 1; fail: priv->fifo_depth = 0; parport_ip32_set_mode(p, ECR_MODE_SPP); return 0; } /*--- Initialization code ----------------------------------------------*/ /** * parport_ip32_make_isa_registers - compute (ISA) register addresses * @regs: pointer to &struct parport_ip32_regs to fill * @base: base address of standard and EPP registers * @base_hi: base address of ECP registers * @regshift: how much to shift register offset by * * Compute register addresses, according to the ISA standard. The addresses * of the standard and EPP registers are computed from address @base. The * addresses of the ECP registers are computed from address @base_hi. */ static void __init parport_ip32_make_isa_registers(struct parport_ip32_regs *regs, void __iomem *base, void __iomem *base_hi, unsigned int regshift) { #define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift)) #define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift)) *regs = (struct parport_ip32_regs){ .data = r_base(0), .dsr = r_base(1), .dcr = r_base(2), .eppAddr = r_base(3), .eppData0 = r_base(4), .eppData1 = r_base(5), .eppData2 = r_base(6), .eppData3 = r_base(7), .ecpAFifo = r_base(0), .fifo = r_base_hi(0), .cnfgA = r_base_hi(0), .cnfgB = r_base_hi(1), .ecr = r_base_hi(2) }; #undef r_base_hi #undef r_base } /** * parport_ip32_probe_port - probe and register IP32 built-in parallel port * * Returns the new allocated &parport structure. On error, an error code is * encoded in return value with the ERR_PTR function. */ static __init struct parport *parport_ip32_probe_port(void) { struct parport_ip32_regs regs; struct parport_ip32_private *priv = NULL; struct parport_operations *ops = NULL; struct parport *p = NULL; int err; parport_ip32_make_isa_registers(&regs, &mace->isa.parallel, &mace->isa.ecp1284, 8 /* regshift */); ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL); priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL); p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops); if (ops == NULL || priv == NULL || p == NULL) { err = -ENOMEM; goto fail; } p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel); p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284); p->private_data = priv; *ops = parport_ip32_ops; *priv = (struct parport_ip32_private){ .regs = regs, .dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT | DCR_AUTOFD | DCR_STROBE, .irq_mode = PARPORT_IP32_IRQ_FWD, }; init_completion(&priv->irq_complete); /* Probe port. */ if (!parport_ip32_ecp_supported(p)) { err = -ENODEV; goto fail; } parport_ip32_dump_state(p, "begin init", 0); /* We found what looks like a working ECR register. Simply assume * that all modes are correctly supported. Enable basic modes. */ p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT; p->modes |= PARPORT_MODE_TRISTATE; if (!parport_ip32_fifo_supported(p)) { printk(KERN_WARNING PPIP32 "%s: error: FIFO disabled\n", p->name); /* Disable hardware modes depending on a working FIFO. */ features &= ~PARPORT_IP32_ENABLE_SPP; features &= ~PARPORT_IP32_ENABLE_ECP; /* DMA is not needed if FIFO is not supported. */ features &= ~PARPORT_IP32_ENABLE_DMA; } /* Request IRQ */ if (features & PARPORT_IP32_ENABLE_IRQ) { int irq = MACEISA_PARALLEL_IRQ; if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) { printk(KERN_WARNING PPIP32 "%s: error: IRQ disabled\n", p->name); /* DMA cannot work without interrupts. */ features &= ~PARPORT_IP32_ENABLE_DMA; } else { pr_probe(p, "Interrupt support enabled\n"); p->irq = irq; priv->dcr_writable |= DCR_IRQ; } } /* Allocate DMA resources */ if (features & PARPORT_IP32_ENABLE_DMA) { if (parport_ip32_dma_register()) printk(KERN_WARNING PPIP32 "%s: error: DMA disabled\n", p->name); else { pr_probe(p, "DMA support enabled\n"); p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */ p->modes |= PARPORT_MODE_DMA; } } if (features & PARPORT_IP32_ENABLE_SPP) { /* Enable compatibility FIFO mode */ p->ops->compat_write_data = parport_ip32_compat_write_data; p->modes |= PARPORT_MODE_COMPAT; pr_probe(p, "Hardware support for SPP mode enabled\n"); } if (features & PARPORT_IP32_ENABLE_EPP) { /* Set up access functions to use EPP hardware. */ p->ops->epp_read_data = parport_ip32_epp_read_data; p->ops->epp_write_data = parport_ip32_epp_write_data; p->ops->epp_read_addr = parport_ip32_epp_read_addr; p->ops->epp_write_addr = parport_ip32_epp_write_addr; p->modes |= PARPORT_MODE_EPP; pr_probe(p, "Hardware support for EPP mode enabled\n"); } if (features & PARPORT_IP32_ENABLE_ECP) { /* Enable ECP FIFO mode */ p->ops->ecp_write_data = parport_ip32_ecp_write_data; /* FIXME - not implemented */ /* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */ /* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */ p->modes |= PARPORT_MODE_ECP; pr_probe(p, "Hardware support for ECP mode enabled\n"); } /* Initialize the port with sensible values */ parport_ip32_set_mode(p, ECR_MODE_PS2); parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT); parport_ip32_data_forward(p); parport_ip32_disable_irq(p); parport_ip32_write_data(p, 0x00); parport_ip32_dump_state(p, "end init", 0); /* Print out what we found */ printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)", p->name, p->base, p->base_hi); if (p->irq != PARPORT_IRQ_NONE) printk(", irq %d", p->irq); printk(" ["); #define printmode(x) if (p->modes & PARPORT_MODE_##x) \ printk("%s%s", f++ ? "," : "", #x) { unsigned int f = 0; printmode(PCSPP); printmode(TRISTATE); printmode(COMPAT); printmode(EPP); printmode(ECP); printmode(DMA); } #undef printmode printk("]\n"); parport_announce_port(p); return p; fail: if (p) parport_put_port(p); kfree(priv); kfree(ops); return ERR_PTR(err); } /** * parport_ip32_unregister_port - unregister a parallel port * @p: pointer to the &struct parport * * Unregisters a parallel port and free previously allocated resources * (memory, IRQ, ...). */ static __exit void parport_ip32_unregister_port(struct parport *p) { struct parport_ip32_private * const priv = p->physport->private_data; struct parport_operations *ops = p->ops; parport_remove_port(p); if (p->modes & PARPORT_MODE_DMA) parport_ip32_dma_unregister(); if (p->irq != PARPORT_IRQ_NONE) free_irq(p->irq, p); parport_put_port(p); kfree(priv); kfree(ops); } /** * parport_ip32_init - module initialization function */ static int __init parport_ip32_init(void) { pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n"); this_port = parport_ip32_probe_port(); return IS_ERR(this_port) ? PTR_ERR(this_port) : 0; } /** * parport_ip32_exit - module termination function */ static void __exit parport_ip32_exit(void) { parport_ip32_unregister_port(this_port); } /*--- Module stuff -----------------------------------------------------*/ MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>"); MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */ module_init(parport_ip32_init); module_exit(parport_ip32_exit); module_param(verbose_probing, bool, S_IRUGO); MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization"); module_param(features, uint, S_IRUGO); MODULE_PARM_DESC(features, "Bit mask of features to enable" ", bit 0: IRQ support" ", bit 1: DMA support" ", bit 2: hardware SPP mode" ", bit 3: hardware EPP mode" ", bit 4: hardware ECP mode"); /*--- Inform (X)Emacs about preferred coding style ---------------------*/ /* * Local Variables: * mode: c * c-file-style: "linux" * indent-tabs-mode: t * tab-width: 8 * fill-column: 78 * ispell-local-dictionary: "american" * End: */
gpl-2.0
i-maravic/MPLS-Linux
lib/decompress_unlzma.c
7737
16161
/* Lzma decompressor for Linux kernel. Shamelessly snarfed *from busybox 1.1.1 * *Linux kernel adaptation *Copyright (C) 2006 Alain < alain@knaff.lu > * *Based on small lzma deflate implementation/Small range coder *implementation for lzma. *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > * *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) *Copyright (C) 1999-2005 Igor Pavlov * *Copyrights of the parts, see headers below. * * *This program is free software; you can redistribute it and/or *modify it under the terms of the GNU Lesser General Public *License as published by the Free Software Foundation; either *version 2.1 of the License, or (at your option) any later version. * *This program is distributed in the hope that it will be useful, *but WITHOUT ANY WARRANTY; without even the implied warranty of *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *Lesser General Public License for more details. * *You should have received a copy of the GNU Lesser General Public *License along with this library; if not, write to the Free Software *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef STATIC #define PREBOOT #else #include <linux/decompress/unlzma.h> #endif /* STATIC */ #include <linux/decompress/mm.h> #define MIN(a, b) (((a) < (b)) ? (a) : (b)) static long long INIT read_int(unsigned char *ptr, int size) { int i; long long ret = 0; for (i = 0; i < size; i++) ret = (ret << 8) | ptr[size-i-1]; return ret; } #define ENDIAN_CONVERT(x) \ x = (typeof(x))read_int((unsigned char *)&x, sizeof(x)) /* Small range coder implementation for lzma. *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > * *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) *Copyright (c) 1999-2005 Igor Pavlov */ #include <linux/compiler.h> #define LZMA_IOBUF_SIZE 0x10000 struct rc { int (*fill)(void*, unsigned int); uint8_t *ptr; uint8_t *buffer; uint8_t *buffer_end; int buffer_size; uint32_t code; uint32_t range; uint32_t bound; void (*error)(char *); }; #define RC_TOP_BITS 24 #define RC_MOVE_BITS 5 #define RC_MODEL_TOTAL_BITS 11 static int INIT nofill(void *buffer, unsigned int len) { return -1; } /* Called twice: once at startup and once in rc_normalize() */ static void INIT rc_read(struct rc *rc) { rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE); if (rc->buffer_size <= 0) rc->error("unexpected EOF"); rc->ptr = rc->buffer; rc->buffer_end = rc->buffer + rc->buffer_size; } /* Called once */ static inline void INIT rc_init(struct rc *rc, int (*fill)(void*, unsigned int), char *buffer, int buffer_size) { if (fill) rc->fill = fill; else rc->fill = nofill; rc->buffer = (uint8_t *)buffer; rc->buffer_size = buffer_size; rc->buffer_end = rc->buffer + rc->buffer_size; rc->ptr = rc->buffer; rc->code = 0; rc->range = 0xFFFFFFFF; } static inline void INIT rc_init_code(struct rc *rc) { int i; for (i = 0; i < 5; i++) { if (rc->ptr >= rc->buffer_end) rc_read(rc); rc->code = (rc->code << 8) | *rc->ptr++; } } /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */ static void INIT rc_do_normalize(struct rc *rc) { if (rc->ptr >= rc->buffer_end) rc_read(rc); rc->range <<= 8; rc->code = (rc->code << 8) | *rc->ptr++; } static inline void INIT rc_normalize(struct rc *rc) { if (rc->range < (1 << RC_TOP_BITS)) rc_do_normalize(rc); } /* Called 9 times */ /* Why rc_is_bit_0_helper exists? *Because we want to always expose (rc->code < rc->bound) to optimizer */ static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p) { rc_normalize(rc); rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); return rc->bound; } static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p) { uint32_t t = rc_is_bit_0_helper(rc, p); return rc->code < t; } /* Called ~10 times, but very small, thus inlined */ static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p) { rc->range = rc->bound; *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; } static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p) { rc->range -= rc->bound; rc->code -= rc->bound; *p -= *p >> RC_MOVE_BITS; } /* Called 4 times in unlzma loop */ static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol) { if (rc_is_bit_0(rc, p)) { rc_update_bit_0(rc, p); *symbol *= 2; return 0; } else { rc_update_bit_1(rc, p); *symbol = *symbol * 2 + 1; return 1; } } /* Called once */ static inline int INIT rc_direct_bit(struct rc *rc) { rc_normalize(rc); rc->range >>= 1; if (rc->code >= rc->range) { rc->code -= rc->range; return 1; } return 0; } /* Called twice */ static inline void INIT rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol) { int i = num_levels; *symbol = 1; while (i--) rc_get_bit(rc, p + *symbol, symbol); *symbol -= 1 << num_levels; } /* * Small lzma deflate implementation. * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > * * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) * Copyright (C) 1999-2005 Igor Pavlov */ struct lzma_header { uint8_t pos; uint32_t dict_size; uint64_t dst_size; } __attribute__ ((packed)) ; #define LZMA_BASE_SIZE 1846 #define LZMA_LIT_SIZE 768 #define LZMA_NUM_POS_BITS_MAX 4 #define LZMA_LEN_NUM_LOW_BITS 3 #define LZMA_LEN_NUM_MID_BITS 3 #define LZMA_LEN_NUM_HIGH_BITS 8 #define LZMA_LEN_CHOICE 0 #define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1) #define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1) #define LZMA_LEN_MID (LZMA_LEN_LOW \ + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))) #define LZMA_LEN_HIGH (LZMA_LEN_MID \ +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))) #define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)) #define LZMA_NUM_STATES 12 #define LZMA_NUM_LIT_STATES 7 #define LZMA_START_POS_MODEL_INDEX 4 #define LZMA_END_POS_MODEL_INDEX 14 #define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1)) #define LZMA_NUM_POS_SLOT_BITS 6 #define LZMA_NUM_LEN_TO_POS_STATES 4 #define LZMA_NUM_ALIGN_BITS 4 #define LZMA_MATCH_MIN_LEN 2 #define LZMA_IS_MATCH 0 #define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) #define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES) #define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES) #define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES) #define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES) #define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \ + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) #define LZMA_SPEC_POS (LZMA_POS_SLOT \ +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)) #define LZMA_ALIGN (LZMA_SPEC_POS \ + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX) #define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS)) #define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS) #define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS) struct writer { uint8_t *buffer; uint8_t previous_byte; size_t buffer_pos; int bufsize; size_t global_pos; int(*flush)(void*, unsigned int); struct lzma_header *header; }; struct cstate { int state; uint32_t rep0, rep1, rep2, rep3; }; static inline size_t INIT get_pos(struct writer *wr) { return wr->global_pos + wr->buffer_pos; } static inline uint8_t INIT peek_old_byte(struct writer *wr, uint32_t offs) { if (!wr->flush) { int32_t pos; while (offs > wr->header->dict_size) offs -= wr->header->dict_size; pos = wr->buffer_pos - offs; return wr->buffer[pos]; } else { uint32_t pos = wr->buffer_pos - offs; while (pos >= wr->header->dict_size) pos += wr->header->dict_size; return wr->buffer[pos]; } } static inline int INIT write_byte(struct writer *wr, uint8_t byte) { wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte; if (wr->flush && wr->buffer_pos == wr->header->dict_size) { wr->buffer_pos = 0; wr->global_pos += wr->header->dict_size; if (wr->flush((char *)wr->buffer, wr->header->dict_size) != wr->header->dict_size) return -1; } return 0; } static inline int INIT copy_byte(struct writer *wr, uint32_t offs) { return write_byte(wr, peek_old_byte(wr, offs)); } static inline int INIT copy_bytes(struct writer *wr, uint32_t rep0, int len) { do { if (copy_byte(wr, rep0)) return -1; len--; } while (len != 0 && wr->buffer_pos < wr->header->dst_size); return len; } static inline int INIT process_bit0(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob, int lc, uint32_t literal_pos_mask) { int mi = 1; rc_update_bit_0(rc, prob); prob = (p + LZMA_LITERAL + (LZMA_LIT_SIZE * (((get_pos(wr) & literal_pos_mask) << lc) + (wr->previous_byte >> (8 - lc)))) ); if (cst->state >= LZMA_NUM_LIT_STATES) { int match_byte = peek_old_byte(wr, cst->rep0); do { int bit; uint16_t *prob_lit; match_byte <<= 1; bit = match_byte & 0x100; prob_lit = prob + 0x100 + bit + mi; if (rc_get_bit(rc, prob_lit, &mi)) { if (!bit) break; } else { if (bit) break; } } while (mi < 0x100); } while (mi < 0x100) { uint16_t *prob_lit = prob + mi; rc_get_bit(rc, prob_lit, &mi); } if (cst->state < 4) cst->state = 0; else if (cst->state < 10) cst->state -= 3; else cst->state -= 6; return write_byte(wr, mi); } static inline int INIT process_bit1(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob) { int offset; uint16_t *prob_len; int num_bits; int len; rc_update_bit_1(rc, prob); prob = p + LZMA_IS_REP + cst->state; if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); cst->rep3 = cst->rep2; cst->rep2 = cst->rep1; cst->rep1 = cst->rep0; cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3; prob = p + LZMA_LEN_CODER; } else { rc_update_bit_1(rc, prob); prob = p + LZMA_IS_REP_G0 + cst->state; if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); prob = (p + LZMA_IS_REP_0_LONG + (cst->state << LZMA_NUM_POS_BITS_MAX) + pos_state); if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); cst->state = cst->state < LZMA_NUM_LIT_STATES ? 9 : 11; return copy_byte(wr, cst->rep0); } else { rc_update_bit_1(rc, prob); } } else { uint32_t distance; rc_update_bit_1(rc, prob); prob = p + LZMA_IS_REP_G1 + cst->state; if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); distance = cst->rep1; } else { rc_update_bit_1(rc, prob); prob = p + LZMA_IS_REP_G2 + cst->state; if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); distance = cst->rep2; } else { rc_update_bit_1(rc, prob); distance = cst->rep3; cst->rep3 = cst->rep2; } cst->rep2 = cst->rep1; } cst->rep1 = cst->rep0; cst->rep0 = distance; } cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11; prob = p + LZMA_REP_LEN_CODER; } prob_len = prob + LZMA_LEN_CHOICE; if (rc_is_bit_0(rc, prob_len)) { rc_update_bit_0(rc, prob_len); prob_len = (prob + LZMA_LEN_LOW + (pos_state << LZMA_LEN_NUM_LOW_BITS)); offset = 0; num_bits = LZMA_LEN_NUM_LOW_BITS; } else { rc_update_bit_1(rc, prob_len); prob_len = prob + LZMA_LEN_CHOICE_2; if (rc_is_bit_0(rc, prob_len)) { rc_update_bit_0(rc, prob_len); prob_len = (prob + LZMA_LEN_MID + (pos_state << LZMA_LEN_NUM_MID_BITS)); offset = 1 << LZMA_LEN_NUM_LOW_BITS; num_bits = LZMA_LEN_NUM_MID_BITS; } else { rc_update_bit_1(rc, prob_len); prob_len = prob + LZMA_LEN_HIGH; offset = ((1 << LZMA_LEN_NUM_LOW_BITS) + (1 << LZMA_LEN_NUM_MID_BITS)); num_bits = LZMA_LEN_NUM_HIGH_BITS; } } rc_bit_tree_decode(rc, prob_len, num_bits, &len); len += offset; if (cst->state < 4) { int pos_slot; cst->state += LZMA_NUM_LIT_STATES; prob = p + LZMA_POS_SLOT + ((len < LZMA_NUM_LEN_TO_POS_STATES ? len : LZMA_NUM_LEN_TO_POS_STATES - 1) << LZMA_NUM_POS_SLOT_BITS); rc_bit_tree_decode(rc, prob, LZMA_NUM_POS_SLOT_BITS, &pos_slot); if (pos_slot >= LZMA_START_POS_MODEL_INDEX) { int i, mi; num_bits = (pos_slot >> 1) - 1; cst->rep0 = 2 | (pos_slot & 1); if (pos_slot < LZMA_END_POS_MODEL_INDEX) { cst->rep0 <<= num_bits; prob = p + LZMA_SPEC_POS + cst->rep0 - pos_slot - 1; } else { num_bits -= LZMA_NUM_ALIGN_BITS; while (num_bits--) cst->rep0 = (cst->rep0 << 1) | rc_direct_bit(rc); prob = p + LZMA_ALIGN; cst->rep0 <<= LZMA_NUM_ALIGN_BITS; num_bits = LZMA_NUM_ALIGN_BITS; } i = 1; mi = 1; while (num_bits--) { if (rc_get_bit(rc, prob + mi, &mi)) cst->rep0 |= i; i <<= 1; } } else cst->rep0 = pos_slot; if (++(cst->rep0) == 0) return 0; if (cst->rep0 > wr->header->dict_size || cst->rep0 > get_pos(wr)) return -1; } len += LZMA_MATCH_MIN_LEN; return copy_bytes(wr, cst->rep0, len); } STATIC inline int INIT unlzma(unsigned char *buf, int in_len, int(*fill)(void*, unsigned int), int(*flush)(void*, unsigned int), unsigned char *output, int *posp, void(*error)(char *x) ) { struct lzma_header header; int lc, pb, lp; uint32_t pos_state_mask; uint32_t literal_pos_mask; uint16_t *p; int num_probs; struct rc rc; int i, mi; struct writer wr; struct cstate cst; unsigned char *inbuf; int ret = -1; rc.error = error; if (buf) inbuf = buf; else inbuf = malloc(LZMA_IOBUF_SIZE); if (!inbuf) { error("Could not allocate input buffer"); goto exit_0; } cst.state = 0; cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1; wr.header = &header; wr.flush = flush; wr.global_pos = 0; wr.previous_byte = 0; wr.buffer_pos = 0; rc_init(&rc, fill, inbuf, in_len); for (i = 0; i < sizeof(header); i++) { if (rc.ptr >= rc.buffer_end) rc_read(&rc); ((unsigned char *)&header)[i] = *rc.ptr++; } if (header.pos >= (9 * 5 * 5)) { error("bad header"); goto exit_1; } mi = 0; lc = header.pos; while (lc >= 9) { mi++; lc -= 9; } pb = 0; lp = mi; while (lp >= 5) { pb++; lp -= 5; } pos_state_mask = (1 << pb) - 1; literal_pos_mask = (1 << lp) - 1; ENDIAN_CONVERT(header.dict_size); ENDIAN_CONVERT(header.dst_size); if (header.dict_size == 0) header.dict_size = 1; if (output) wr.buffer = output; else { wr.bufsize = MIN(header.dst_size, header.dict_size); wr.buffer = large_malloc(wr.bufsize); } if (wr.buffer == NULL) goto exit_1; num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); p = (uint16_t *) large_malloc(num_probs * sizeof(*p)); if (p == 0) goto exit_2; num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); for (i = 0; i < num_probs; i++) p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1; rc_init_code(&rc); while (get_pos(&wr) < header.dst_size) { int pos_state = get_pos(&wr) & pos_state_mask; uint16_t *prob = p + LZMA_IS_MATCH + (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state; if (rc_is_bit_0(&rc, prob)) { if (process_bit0(&wr, &rc, &cst, p, pos_state, prob, lc, literal_pos_mask)) { error("LZMA data is corrupt"); goto exit_3; } } else { if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) { error("LZMA data is corrupt"); goto exit_3; } if (cst.rep0 == 0) break; } if (rc.buffer_size <= 0) goto exit_3; } if (posp) *posp = rc.ptr-rc.buffer; if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos) ret = 0; exit_3: large_free(p); exit_2: if (!output) large_free(wr.buffer); exit_1: if (!buf) free(inbuf); exit_0: return ret; } #ifdef PREBOOT STATIC int INIT decompress(unsigned char *buf, int in_len, int(*fill)(void*, unsigned int), int(*flush)(void*, unsigned int), unsigned char *output, int *posp, void(*error)(char *x) ) { return unlzma(buf, in_len - 4, fill, flush, output, posp, error); } #endif
gpl-2.0
AAN3AC/android_kernel_google_msm
drivers/input/joystick/a3d.c
7993
11356
/* * Copyright (c) 1998-2001 Vojtech Pavlik */ /* * FP-Gaming Assassin 3D joystick driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/gameport.h> #include <linux/input.h> #include <linux/jiffies.h> #define DRIVER_DESC "FP-Gaming Assassin 3D joystick driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define A3D_MAX_START 600 /* 600 us */ #define A3D_MAX_STROBE 80 /* 80 us */ #define A3D_MAX_LENGTH 40 /* 40*3 bits */ #define A3D_MODE_A3D 1 /* Assassin 3D */ #define A3D_MODE_PAN 2 /* Panther */ #define A3D_MODE_OEM 3 /* Panther OEM version */ #define A3D_MODE_PXL 4 /* Panther XL */ static char *a3d_names[] = { NULL, "FP-Gaming Assassin 3D", "MadCatz Panther", "OEM Panther", "MadCatz Panther XL", "MadCatz Panther XL w/ rudder" }; struct a3d { struct gameport *gameport; struct gameport *adc; struct input_dev *dev; int axes[4]; int buttons; int mode; int length; int reads; int bads; char phys[32]; }; /* * a3d_read_packet() reads an Assassin 3D packet. */ static int a3d_read_packet(struct gameport *gameport, int length, char *data) { unsigned long flags; unsigned char u, v; unsigned int t, s; int i; i = 0; t = gameport_time(gameport, A3D_MAX_START); s = gameport_time(gameport, A3D_MAX_STROBE); local_irq_save(flags); gameport_trigger(gameport); v = gameport_read(gameport); while (t > 0 && i < length) { t--; u = v; v = gameport_read(gameport); if (~v & u & 0x10) { data[i++] = v >> 5; t = s; } } local_irq_restore(flags); return i; } /* * a3d_csum() computes checksum of triplet packet */ static int a3d_csum(char *data, int count) { int i, csum = 0; for (i = 0; i < count - 2; i++) csum += data[i]; return (csum & 0x3f) != ((data[count - 2] << 3) | data[count - 1]); } static void a3d_read(struct a3d *a3d, unsigned char *data) { struct input_dev *dev = a3d->dev; switch (a3d->mode) { case A3D_MODE_A3D: case A3D_MODE_OEM: case A3D_MODE_PAN: input_report_rel(dev, REL_X, ((data[5] << 6) | (data[6] << 3) | data[ 7]) - ((data[5] & 4) << 7)); input_report_rel(dev, REL_Y, ((data[8] << 6) | (data[9] << 3) | data[10]) - ((data[8] & 4) << 7)); input_report_key(dev, BTN_RIGHT, data[2] & 1); input_report_key(dev, BTN_LEFT, data[3] & 2); input_report_key(dev, BTN_MIDDLE, data[3] & 4); input_sync(dev); a3d->axes[0] = ((signed char)((data[11] << 6) | (data[12] << 3) | (data[13]))) + 128; a3d->axes[1] = ((signed char)((data[14] << 6) | (data[15] << 3) | (data[16]))) + 128; a3d->axes[2] = ((signed char)((data[17] << 6) | (data[18] << 3) | (data[19]))) + 128; a3d->axes[3] = ((signed char)((data[20] << 6) | (data[21] << 3) | (data[22]))) + 128; a3d->buttons = ((data[3] << 3) | data[4]) & 0xf; break; case A3D_MODE_PXL: input_report_rel(dev, REL_X, ((data[ 9] << 6) | (data[10] << 3) | data[11]) - ((data[ 9] & 4) << 7)); input_report_rel(dev, REL_Y, ((data[12] << 6) | (data[13] << 3) | data[14]) - ((data[12] & 4) << 7)); input_report_key(dev, BTN_RIGHT, data[2] & 1); input_report_key(dev, BTN_LEFT, data[3] & 2); input_report_key(dev, BTN_MIDDLE, data[3] & 4); input_report_key(dev, BTN_SIDE, data[7] & 2); input_report_key(dev, BTN_EXTRA, data[7] & 4); input_report_abs(dev, ABS_X, ((signed char)((data[15] << 6) | (data[16] << 3) | (data[17]))) + 128); input_report_abs(dev, ABS_Y, ((signed char)((data[18] << 6) | (data[19] << 3) | (data[20]))) + 128); input_report_abs(dev, ABS_RUDDER, ((signed char)((data[21] << 6) | (data[22] << 3) | (data[23]))) + 128); input_report_abs(dev, ABS_THROTTLE, ((signed char)((data[24] << 6) | (data[25] << 3) | (data[26]))) + 128); input_report_abs(dev, ABS_HAT0X, ( data[5] & 1) - ((data[5] >> 2) & 1)); input_report_abs(dev, ABS_HAT0Y, ((data[5] >> 1) & 1) - ((data[6] >> 2) & 1)); input_report_abs(dev, ABS_HAT1X, ((data[4] >> 1) & 1) - ( data[3] & 1)); input_report_abs(dev, ABS_HAT1Y, ((data[4] >> 2) & 1) - ( data[4] & 1)); input_report_key(dev, BTN_TRIGGER, data[8] & 1); input_report_key(dev, BTN_THUMB, data[8] & 2); input_report_key(dev, BTN_TOP, data[8] & 4); input_report_key(dev, BTN_PINKIE, data[7] & 1); input_sync(dev); break; } } /* * a3d_poll() reads and analyzes A3D joystick data. */ static void a3d_poll(struct gameport *gameport) { struct a3d *a3d = gameport_get_drvdata(gameport); unsigned char data[A3D_MAX_LENGTH]; a3d->reads++; if (a3d_read_packet(a3d->gameport, a3d->length, data) != a3d->length || data[0] != a3d->mode || a3d_csum(data, a3d->length)) a3d->bads++; else a3d_read(a3d, data); } /* * a3d_adc_cooked_read() copies the acis and button data to the * callers arrays. It could do the read itself, but the caller could * call this more than 50 times a second, which would use too much CPU. */ static int a3d_adc_cooked_read(struct gameport *gameport, int *axes, int *buttons) { struct a3d *a3d = gameport->port_data; int i; for (i = 0; i < 4; i++) axes[i] = (a3d->axes[i] < 254) ? a3d->axes[i] : -1; *buttons = a3d->buttons; return 0; } /* * a3d_adc_open() is the gameport open routine. It refuses to serve * any but cooked data. */ static int a3d_adc_open(struct gameport *gameport, int mode) { struct a3d *a3d = gameport->port_data; if (mode != GAMEPORT_MODE_COOKED) return -1; gameport_start_polling(a3d->gameport); return 0; } /* * a3d_adc_close() is a callback from the input close routine. */ static void a3d_adc_close(struct gameport *gameport) { struct a3d *a3d = gameport->port_data; gameport_stop_polling(a3d->gameport); } /* * a3d_open() is a callback from the input open routine. */ static int a3d_open(struct input_dev *dev) { struct a3d *a3d = input_get_drvdata(dev); gameport_start_polling(a3d->gameport); return 0; } /* * a3d_close() is a callback from the input close routine. */ static void a3d_close(struct input_dev *dev) { struct a3d *a3d = input_get_drvdata(dev); gameport_stop_polling(a3d->gameport); } /* * a3d_connect() probes for A3D joysticks. */ static int a3d_connect(struct gameport *gameport, struct gameport_driver *drv) { struct a3d *a3d; struct input_dev *input_dev; struct gameport *adc; unsigned char data[A3D_MAX_LENGTH]; int i; int err; a3d = kzalloc(sizeof(struct a3d), GFP_KERNEL); input_dev = input_allocate_device(); if (!a3d || !input_dev) { err = -ENOMEM; goto fail1; } a3d->dev = input_dev; a3d->gameport = gameport; gameport_set_drvdata(gameport, a3d); err = gameport_open(gameport, drv, GAMEPORT_MODE_RAW); if (err) goto fail1; i = a3d_read_packet(gameport, A3D_MAX_LENGTH, data); if (!i || a3d_csum(data, i)) { err = -ENODEV; goto fail2; } a3d->mode = data[0]; if (!a3d->mode || a3d->mode > 5) { printk(KERN_WARNING "a3d.c: Unknown A3D device detected " "(%s, id=%d), contact <vojtech@ucw.cz>\n", gameport->phys, a3d->mode); err = -ENODEV; goto fail2; } gameport_set_poll_handler(gameport, a3d_poll); gameport_set_poll_interval(gameport, 20); snprintf(a3d->phys, sizeof(a3d->phys), "%s/input0", gameport->phys); input_dev->name = a3d_names[a3d->mode]; input_dev->phys = a3d->phys; input_dev->id.bustype = BUS_GAMEPORT; input_dev->id.vendor = GAMEPORT_ID_VENDOR_MADCATZ; input_dev->id.product = a3d->mode; input_dev->id.version = 0x0100; input_dev->dev.parent = &gameport->dev; input_dev->open = a3d_open; input_dev->close = a3d_close; input_set_drvdata(input_dev, a3d); if (a3d->mode == A3D_MODE_PXL) { int axes[] = { ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER }; a3d->length = 33; input_dev->evbit[0] |= BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); input_dev->relbit[0] |= BIT_MASK(REL_X) | BIT_MASK(REL_Y); input_dev->absbit[0] |= BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) | BIT_MASK(ABS_THROTTLE) | BIT_MASK(ABS_RUDDER) | BIT_MASK(ABS_HAT0X) | BIT_MASK(ABS_HAT0Y) | BIT_MASK(ABS_HAT1X) | BIT_MASK(ABS_HAT1Y); input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA); input_dev->keybit[BIT_WORD(BTN_JOYSTICK)] |= BIT_MASK(BTN_TRIGGER) | BIT_MASK(BTN_THUMB) | BIT_MASK(BTN_TOP) | BIT_MASK(BTN_PINKIE); a3d_read(a3d, data); for (i = 0; i < 4; i++) { if (i < 2) input_set_abs_params(input_dev, axes[i], 48, input_abs_get_val(input_dev, axes[i]) * 2 - 48, 0, 8); else input_set_abs_params(input_dev, axes[i], 2, 253, 0, 0); input_set_abs_params(input_dev, ABS_HAT0X + i, -1, 1, 0, 0); } } else { a3d->length = 29; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); input_dev->relbit[0] |= BIT_MASK(REL_X) | BIT_MASK(REL_Y); input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE); a3d_read(a3d, data); if (!(a3d->adc = adc = gameport_allocate_port())) printk(KERN_ERR "a3d: Not enough memory for ADC port\n"); else { adc->port_data = a3d; adc->open = a3d_adc_open; adc->close = a3d_adc_close; adc->cooked_read = a3d_adc_cooked_read; adc->fuzz = 1; gameport_set_name(adc, a3d_names[a3d->mode]); gameport_set_phys(adc, "%s/gameport0", gameport->phys); adc->dev.parent = &gameport->dev; gameport_register_port(adc); } } err = input_register_device(a3d->dev); if (err) goto fail3; return 0; fail3: if (a3d->adc) gameport_unregister_port(a3d->adc); fail2: gameport_close(gameport); fail1: gameport_set_drvdata(gameport, NULL); input_free_device(input_dev); kfree(a3d); return err; } static void a3d_disconnect(struct gameport *gameport) { struct a3d *a3d = gameport_get_drvdata(gameport); input_unregister_device(a3d->dev); if (a3d->adc) gameport_unregister_port(a3d->adc); gameport_close(gameport); gameport_set_drvdata(gameport, NULL); kfree(a3d); } static struct gameport_driver a3d_drv = { .driver = { .name = "adc", .owner = THIS_MODULE, }, .description = DRIVER_DESC, .connect = a3d_connect, .disconnect = a3d_disconnect, }; static int __init a3d_init(void) { return gameport_register_driver(&a3d_drv); } static void __exit a3d_exit(void) { gameport_unregister_driver(&a3d_drv); } module_init(a3d_init); module_exit(a3d_exit);
gpl-2.0
roguesyko/the-reaper
drivers/net/cxgb4/t4_hw.c
7993
89030
/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/init.h> #include <linux/delay.h> #include "cxgb4.h" #include "t4_regs.h" #include "t4fw_api.h" /** * t4_wait_op_done_val - wait until an operation is completed * @adapter: the adapter performing the operation * @reg: the register to check for completion * @mask: a single-bit field within @reg that indicates completion * @polarity: the value of the field when the operation is completed * @attempts: number of check iterations * @delay: delay in usecs between iterations * @valp: where to store the value of the register at completion time * * Wait until an operation is completed by checking a bit in a register * up to @attempts times. If @valp is not NULL the value of the register * at the time it indicated completion is stored there. Returns 0 if the * operation completes and -EAGAIN otherwise. */ static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay, u32 *valp) { while (1) { u32 val = t4_read_reg(adapter, reg); if (!!(val & mask) == polarity) { if (valp) *valp = val; return 0; } if (--attempts == 0) return -EAGAIN; if (delay) udelay(delay); } } static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay) { return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, delay, NULL); } /** * t4_set_reg_field - set a register field to a value * @adapter: the adapter to program * @addr: the register address * @mask: specifies the portion of the register to modify * @val: the new value for the register field * * Sets a register field specified by the supplied mask to the * given value. */ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, u32 val) { u32 v = t4_read_reg(adapter, addr) & ~mask; t4_write_reg(adapter, addr, v | val); (void) t4_read_reg(adapter, addr); /* flush */ } /** * t4_read_indirect - read indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect address * @data_reg: register holding the value of the indirect register * @vals: where the read register values are stored * @nregs: how many indirect registers to read * @start_idx: index of first indirect register to read * * Reads registers that are accessed indirectly through an address/data * register pair. */ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t4_write_reg(adap, addr_reg, start_idx); *vals++ = t4_read_reg(adap, data_reg); start_idx++; } } /* * Get the reply to a mailbox command and store it in @rpl in big-endian order. */ static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, u32 mbox_addr) { for ( ; nflit; nflit--, mbox_addr += 8) *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); } /* * Handle a FW assertion reported in a mailbox. */ static void fw_asrt(struct adapter *adap, u32 mbox_addr) { struct fw_debug_cmd asrt; get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); dev_alert(adap->pdev_dev, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); } static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) { dev_err(adap->pdev_dev, "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, (unsigned long long)t4_read_reg64(adap, data_reg), (unsigned long long)t4_read_reg64(adap, data_reg + 8), (unsigned long long)t4_read_reg64(adap, data_reg + 16), (unsigned long long)t4_read_reg64(adap, data_reg + 24), (unsigned long long)t4_read_reg64(adap, data_reg + 32), (unsigned long long)t4_read_reg64(adap, data_reg + 40), (unsigned long long)t4_read_reg64(adap, data_reg + 48), (unsigned long long)t4_read_reg64(adap, data_reg + 56)); } /** * t4_wr_mbox_meat - send a command to FW through the given mailbox * @adap: the adapter * @mbox: index of the mailbox to use * @cmd: the command to write * @size: command length in bytes * @rpl: where to optionally store the reply * @sleep_ok: if true we may sleep while awaiting command completion * * Sends the given command to FW through the selected mailbox and waits * for the FW to execute the command. If @rpl is not %NULL it is used to * store the FW's reply to the command. The command and its optional * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms * to respond. @sleep_ok determines whether we may sleep while awaiting * the response. If sleeping is allowed we use progressive backoff * otherwise we spin. * * The return value is 0 on success or a negative errno on failure. A * failure can happen either because we are not able to execute the * command or FW executes it but signals an error. In the latter case * the return value is the error code indicated by FW (negated). */ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok) { static const int delay[] = { 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 }; u32 v; u64 res; int i, ms, delay_idx; const __be64 *p = cmd; u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); if ((size & 15) || size > MBOX_LEN) return -EINVAL; /* * If the device is off-line, as in EEH, commands will time out. * Fail them early so we don't waste time waiting. */ if (adap->pdev->error_state != pci_channel_io_normal) return -EIO; v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); if (v != MBOX_OWNER_DRV) return v ? -EBUSY : -ETIMEDOUT; for (i = 0; i < size; i += 8) t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); t4_read_reg(adap, ctl_reg); /* flush write */ delay_idx = 0; ms = delay[0]; for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { if (sleep_ok) { ms = delay[delay_idx]; /* last element may repeat */ if (delay_idx < ARRAY_SIZE(delay) - 1) delay_idx++; msleep(ms); } else mdelay(ms); v = t4_read_reg(adap, ctl_reg); if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { if (!(v & MBMSGVALID)) { t4_write_reg(adap, ctl_reg, 0); continue; } res = t4_read_reg64(adap, data_reg); if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, data_reg); res = FW_CMD_RETVAL(EIO); } else if (rpl) get_mbox_rpl(adap, rpl, size / 8, data_reg); if (FW_CMD_RETVAL_GET((int)res)) dump_mbox(adap, mbox, data_reg); t4_write_reg(adap, ctl_reg, 0); return -FW_CMD_RETVAL_GET((int)res); } } dump_mbox(adap, mbox, data_reg); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); return -ETIMEDOUT; } /** * t4_mc_read - read from MC through backdoor accesses * @adap: the adapter * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from MC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) { int i; if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) return -EBUSY; t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); t4_write_reg(adap, MC_BIST_CMD_LEN, 64); t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1)); i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); if (i) return i; #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) for (i = 15; i >= 0; i--) *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, MC_DATA(16)); #undef MC_DATA return 0; } /** * t4_edc_read - read from EDC through backdoor accesses * @adap: the adapter * @idx: which EDC to access * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from EDC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; idx *= EDC_STRIDE; if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) return -EBUSY; t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); t4_write_reg(adap, EDC_BIST_CMD + idx, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); if (i) return i; #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) for (i = 15; i >= 0; i--) *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, EDC_DATA(16)); #undef EDC_DATA return 0; } #define EEPROM_STAT_ADDR 0x7bfc #define VPD_BASE 0 #define VPD_LEN 512 /** * t4_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter * @enable: whether to enable or disable write protection * * Enables or disables write protection on the serial EEPROM. */ int t4_seeprom_wp(struct adapter *adapter, bool enable) { unsigned int v = enable ? 0xc : 0; int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); return ret < 0 ? ret : 0; } /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read * @p: where to store the parameters * * Reads card parameters stored in VPD EEPROM. */ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { int i, ret; int ec, sn; u8 vpd[VPD_LEN], csum; unsigned int vpdr_len, kw_offset, id_len; ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd); if (ret < 0) return ret; if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { dev_err(adapter->pdev_dev, "missing VPD ID string\n"); return -EINVAL; } id_len = pci_vpd_lrdt_size(vpd); if (id_len > ID_LEN) id_len = ID_LEN; i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) { dev_err(adapter->pdev_dev, "missing VPD-R section\n"); return -EINVAL; } vpdr_len = pci_vpd_lrdt_size(&vpd[i]); kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; if (vpdr_len + kw_offset > VPD_LEN) { dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); return -EINVAL; } #define FIND_VPD_KW(var, name) do { \ var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ if (var < 0) { \ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ return -EINVAL; \ } \ var += PCI_VPD_INFO_FLD_HDR_SIZE; \ } while (0) FIND_VPD_KW(i, "RV"); for (csum = 0; i >= 0; i--) csum += vpd[i]; if (csum) { dev_err(adapter->pdev_dev, "corrupted VPD EEPROM, actual csum %u\n", csum); return -EINVAL; } FIND_VPD_KW(ec, "EC"); FIND_VPD_KW(sn, "SN"); #undef FIND_VPD_KW memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); strim(p->id); memcpy(p->ec, vpd + ec, EC_LEN); strim(p->ec); i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strim(p->sn); return 0; } /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ /* flash command opcodes */ SF_PROG_PAGE = 2, /* program page */ SF_WR_DISABLE = 4, /* disable writes */ SF_RD_STATUS = 5, /* read status register */ SF_WR_ENABLE = 6, /* enable writes */ SF_RD_DATA_FAST = 0xb, /* read flash */ SF_RD_ID = 0x9f, /* read ID */ SF_ERASE_SECTOR = 0xd8, /* erase sector */ FW_MAX_SIZE = 512 * 1024, }; /** * sf1_read - read data from the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to read * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @valp: where to store the read data * * Reads up to 4 bytes of data from the serial flash. The location of * the read needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 *valp) { int ret; if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, SF_OP) & BUSY) return -EBUSY; cont = cont ? SF_CONT : 0; lock = lock ? SF_LOCK : 0; t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); if (!ret) *valp = t4_read_reg(adapter, SF_DATA); return ret; } /** * sf1_write - write data to the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to write * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @val: value to write * * Writes up to 4 bytes of data to the serial flash. The location of * the write needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 val) { if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, SF_OP) & BUSY) return -EBUSY; cont = cont ? SF_CONT : 0; lock = lock ? SF_LOCK : 0; t4_write_reg(adapter, SF_DATA, val); t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1) | OP_WR); return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); } /** * flash_wait_op - wait for a flash operation to complete * @adapter: the adapter * @attempts: max number of polls of the status register * @delay: delay between polls in ms * * Wait for a flash operation to complete by polling the status register. */ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) { int ret; u32 status; while (1) { if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) return ret; if (!(status & 1)) return 0; if (--attempts == 0) return -EAGAIN; if (delay) msleep(delay); } } /** * t4_read_flash - read words from serial flash * @adapter: the adapter * @addr: the start address for the read * @nwords: how many 32-bit words to read * @data: where to store the read data * @byte_oriented: whether to store data as bytes or as words * * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's * natural endianess. */ static int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) { int ret; if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) return -EINVAL; addr = swab32(addr) | SF_RD_DATA_FAST; if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) return ret; for ( ; nwords; nwords--, data++) { ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); if (nwords == 1) t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ if (ret) return ret; if (byte_oriented) *data = htonl(*data); } return 0; } /** * t4_write_flash - write up to a page of data to the serial flash * @adapter: the adapter * @addr: the start address to write * @n: length of data to write in bytes * @data: the data to write * * Writes up to a page of data (256 bytes) to the serial flash starting * at the given address. All the data must be written to the same page. */ static int t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n, const u8 *data) { int ret; u32 buf[64]; unsigned int i, c, left, val, offset = addr & 0xff; if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) return -EINVAL; val = swab32(addr) | SF_PROG_PAGE; if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) goto unlock; for (left = n; left; left -= c) { c = min(left, 4U); for (val = 0, i = 0; i < c; ++i) val = (val << 8) + *data++; ret = sf1_write(adapter, c, c != left, 1, val); if (ret) goto unlock; } ret = flash_wait_op(adapter, 8, 1); if (ret) goto unlock; t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ /* Read the page to verify the write succeeded */ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); if (ret) return ret; if (memcmp(data - n, (u8 *)buf + offset, n)) { dev_err(adapter->pdev_dev, "failed to correctly write the flash page at %#x\n", addr); return -EIO; } return 0; unlock: t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ return ret; } /** * get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. */ static int get_fw_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); } /** * get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version * * Reads the TP microcode version from flash. */ static int get_tp_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } /** * t4_check_fw_version - check if the FW is compatible with this driver * @adapter: the adapter * * Checks if an adapter's FW is compatible with the driver. Returns 0 * if there's exact match, a negative error if the version could not be * read or there's a major version mismatch, and a positive value if the * expected major version is found but there's a minor version mismatch. */ int t4_check_fw_version(struct adapter *adapter) { u32 api_vers[2]; int ret, major, minor, micro; ret = get_fw_version(adapter, &adapter->params.fw_vers); if (!ret) ret = get_tp_version(adapter, &adapter->params.tp_vers); if (!ret) ret = t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, intfver_nic), 2, api_vers, 1); if (ret) return ret; major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); memcpy(adapter->params.api_vers, api_vers, sizeof(adapter->params.api_vers)); if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ dev_err(adapter->pdev_dev, "card FW has major version %u, driver wants %u\n", major, FW_VERSION_MAJOR); return -EINVAL; } if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) return 0; /* perfect match */ /* Minor/micro version mismatch. Report it but often it's OK. */ return 1; } /** * t4_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter * @start: the first sector to erase * @end: the last sector to erase * * Erases the sectors in the given inclusive range. */ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) { int ret = 0; while (start <= end) { if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, SF_ERASE_SECTOR | (start << 8))) != 0 || (ret = flash_wait_op(adapter, 14, 500)) != 0) { dev_err(adapter->pdev_dev, "erase of flash sector %d failed, error %d\n", start, ret); break; } start++; } t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ return ret; } /** * t4_load_fw - download firmware * @adap: the adapter * @fw_data: the firmware image to write * @size: image size * * Write the supplied firmware image to the card's serial flash. */ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) { u32 csum; int ret, addr; unsigned int i; u8 first_page[SF_PAGE_SIZE]; const u32 *p = (const u32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; unsigned int fw_img_start = adap->params.sf_fw_start; unsigned int fw_start_sec = fw_img_start / sf_sec_size; if (!size) { dev_err(adap->pdev_dev, "FW image has no data\n"); return -EINVAL; } if (size & 511) { dev_err(adap->pdev_dev, "FW image size not multiple of 512 bytes\n"); return -EINVAL; } if (ntohs(hdr->len512) * 512 != size) { dev_err(adap->pdev_dev, "FW image size differs from size in FW header\n"); return -EINVAL; } if (size > FW_MAX_SIZE) { dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", FW_MAX_SIZE); return -EFBIG; } for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += ntohl(p[i]); if (csum != 0xffffffff) { dev_err(adap->pdev_dev, "corrupted firmware image, checksum %#x\n", csum); return -EINVAL; } i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); if (ret) goto out; /* * We write the correct version at the end so the driver can see a bad * version if the FW write fails. Start by writing a copy of the * first page with a bad version. */ memcpy(first_page, fw_data, SF_PAGE_SIZE); ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); if (ret) goto out; addr = fw_img_start; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; fw_data += SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); if (ret) goto out; } ret = t4_write_flash(adap, fw_img_start + offsetof(struct fw_hdr, fw_ver), sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); out: if (ret) dev_err(adap->pdev_dev, "firmware download failed, error %d\n", ret); return ret; } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) /** * t4_link_start - apply link configuration to MAC/PHY * @phy: the PHY to setup * @mac: the MAC to setup * @lc: the requested link configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then * enable/disable auto-negotiation as desired, and reset. * - If the PHY does not auto-negotiate just reset it. * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); lc->link_ok = 0; if (lc->requested_fc & PAUSE_RX) fc |= FW_PORT_CAP_FC_RX; if (lc->requested_fc & PAUSE_TX) fc |= FW_PORT_CAP_FC_TX; memset(&c, 0, sizeof(c)); c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); if (!(lc->supported & FW_PORT_CAP_ANEG)) { c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); } else if (lc->autoneg == AUTONEG_DISABLE) { c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); } else c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_restart_aneg - restart autonegotiation * @adap: the adapter * @mbox: mbox to use for the FW command * @port: the port id * * Restarts autonegotiation for the selected port. */ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) { struct fw_port_cmd c; memset(&c, 0, sizeof(c)); c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } struct intr_info { unsigned int mask; /* bits to check in interrupt status */ const char *msg; /* message to print or NULL */ short stat_idx; /* stat counter to increment or -1 */ unsigned short fatal; /* whether the condition reported is fatal */ }; /** * t4_handle_intr_status - table driven interrupt handler * @adapter: the adapter that generated the interrupt * @reg: the interrupt status register to process * @acts: table of interrupt actions * * A table driven interrupt handler that applies a set of masks to an * interrupt status word and performs the corresponding actions if the * interrupts described by the mask have occurred. The actions include * optionally emitting a warning or alert message. The table is terminated * by an entry specifying mask 0. Returns the number of fatal interrupt * conditions. */ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, const struct intr_info *acts) { int fatal = 0; unsigned int mask = 0; unsigned int status = t4_read_reg(adapter, reg); for ( ; acts->mask; ++acts) { if (!(status & acts->mask)) continue; if (acts->fatal) { fatal++; dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, status & acts->mask); } else if (acts->msg && printk_ratelimit()) dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, status & acts->mask); mask |= acts->mask; } status &= mask; if (status) /* clear processed interrupts */ t4_write_reg(adapter, reg, status); return fatal; } /* * Interrupt handler for the PCIE module. */ static void pcie_intr_handler(struct adapter *adapter) { static const struct intr_info sysbus_intr_info[] = { { RNPP, "RXNP array parity error", -1, 1 }, { RPCP, "RXPC array parity error", -1, 1 }, { RCIP, "RXCIF array parity error", -1, 1 }, { RCCP, "Rx completions control array parity error", -1, 1 }, { RFTP, "RXFT array parity error", -1, 1 }, { 0 } }; static const struct intr_info pcie_port_intr_info[] = { { TPCP, "TXPC array parity error", -1, 1 }, { TNPP, "TXNP array parity error", -1, 1 }, { TFTP, "TXFT array parity error", -1, 1 }, { TCAP, "TXCA array parity error", -1, 1 }, { TCIP, "TXCIF array parity error", -1, 1 }, { RCAP, "RXCA array parity error", -1, 1 }, { OTDD, "outbound request TLP discarded", -1, 1 }, { RDPE, "Rx data parity error", -1, 1 }, { TDUE, "Tx uncorrectable data error", -1, 1 }, { 0 } }; static const struct intr_info pcie_intr_info[] = { { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, { MSIDATAPERR, "MSI data parity error", -1, 1 }, { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, { FIDPERR, "PCI FID parity error", -1, 1 }, { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, { MATAGPERR, "PCI MA tag parity error", -1, 1 }, { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, { RXWRPERR, "PCI Rx write parity error", -1, 1 }, { RPLPERR, "PCI replay buffer parity error", -1, 1 }, { PCIESINT, "PCI core secondary fault", -1, 1 }, { PCIEPINT, "PCI core primary fault", -1, 1 }, { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, sysbus_intr_info) + t4_handle_intr_status(adapter, PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); if (fat) t4_fatal_err(adapter); } /* * TP interrupt handler. */ static void tp_intr_handler(struct adapter *adapter) { static const struct intr_info tp_intr_info[] = { { 0x3fffffff, "TP parity error", -1, 1 }, { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) t4_fatal_err(adapter); } /* * SGE interrupt handler. */ static void sge_intr_handler(struct adapter *adapter) { u64 v; static const struct intr_info sge_intr_info[] = { { ERR_CPL_EXCEED_IQE_SIZE, "SGE received CPL exceeding IQE size", -1, 1 }, { ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large", -1, 0 }, { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL", -1, 0 }, { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 0 }, { ERR_ING_CTXT_PRIO, "SGE too many priority ingress contexts", -1, 0 }, { ERR_EGR_CTXT_PRIO, "SGE too many priority egress contexts", -1, 0 }, { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, { 0 } }; v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); if (v) { dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", (unsigned long long)v); t4_write_reg(adapter, SGE_INT_CAUSE1, v); t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); } if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || v != 0) t4_fatal_err(adapter); } /* * CIM interrupt handler. */ static void cim_intr_handler(struct adapter *adapter) { static const struct intr_info cim_intr_info[] = { { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, { OBQPARERR, "CIM OBQ parity error", -1, 1 }, { IBQPARERR, "CIM IBQ parity error", -1, 1 }, { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, { 0 } }; static const struct intr_info cim_upintr_info[] = { { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, { ILLWRINT, "CIM illegal write", -1, 1 }, { ILLRDINT, "CIM illegal read", -1, 1 }, { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, cim_intr_info) + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, cim_upintr_info); if (fat) t4_fatal_err(adapter); } /* * ULP RX interrupt handler. */ static void ulprx_intr_handler(struct adapter *adapter) { static const struct intr_info ulprx_intr_info[] = { { 0x1800000, "ULPRX context error", -1, 1 }, { 0x7fffff, "ULPRX parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) t4_fatal_err(adapter); } /* * ULP TX interrupt handler. */ static void ulptx_intr_handler(struct adapter *adapter) { static const struct intr_info ulptx_intr_info[] = { { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 0 }, { 0xfffffff, "ULPTX parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) t4_fatal_err(adapter); } /* * PM TX interrupt handler. */ static void pmtx_intr_handler(struct adapter *adapter) { static const struct intr_info pmtx_intr_info[] = { { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, { 0 } }; if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) t4_fatal_err(adapter); } /* * PM RX interrupt handler. */ static void pmrx_intr_handler(struct adapter *adapter) { static const struct intr_info pmrx_intr_info[] = { { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, { 0 } }; if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) t4_fatal_err(adapter); } /* * CPL switch interrupt handler. */ static void cplsw_intr_handler(struct adapter *adapter) { static const struct intr_info cplsw_intr_info[] = { { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) t4_fatal_err(adapter); } /* * LE interrupt handler. */ static void le_intr_handler(struct adapter *adap) { static const struct intr_info le_intr_info[] = { { LIPMISS, "LE LIP miss", -1, 0 }, { LIP0, "LE 0 LIP error", -1, 0 }, { PARITYERR, "LE parity error", -1, 1 }, { UNKNOWNCMD, "LE unknown command", -1, 1 }, { REQQPARERR, "LE request queue parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) t4_fatal_err(adap); } /* * MPS interrupt handler. */ static void mps_intr_handler(struct adapter *adapter) { static const struct intr_info mps_rx_intr_info[] = { { 0xffffff, "MPS Rx parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_tx_intr_info[] = { { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, { BUBBLE, "MPS Tx underflow", -1, 1 }, { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, { FRMERR, "MPS Tx framing error", -1, 1 }, { 0 } }; static const struct intr_info mps_trc_intr_info[] = { { FILTMEM, "MPS TRC filter parity error", -1, 1 }, { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, { MISCPERR, "MPS TRC misc parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_sram_intr_info[] = { { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_tx_intr_info[] = { { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_rx_intr_info[] = { { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_cls_intr_info[] = { { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, mps_rx_intr_info) + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, mps_tx_intr_info) + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, mps_trc_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, mps_stat_sram_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, mps_stat_tx_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, mps_stat_rx_intr_info) + t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, mps_cls_intr_info); t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | RXINT | TXINT | STATINT); t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ if (fat) t4_fatal_err(adapter); } #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) /* * EDC/MC interrupt handler. */ static void mem_intr_handler(struct adapter *adapter, int idx) { static const char name[3][5] = { "EDC0", "EDC1", "MC" }; unsigned int addr, cnt_addr, v; if (idx <= MEM_EDC1) { addr = EDC_REG(EDC_INT_CAUSE, idx); cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); } else { addr = MC_INT_CAUSE; cnt_addr = MC_ECC_STATUS; } v = t4_read_reg(adapter, addr) & MEM_INT_MASK; if (v & PERR_INT_CAUSE) dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", name[idx]); if (v & ECC_CE_INT_CAUSE) { u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); if (printk_ratelimit()) dev_warn(adapter->pdev_dev, "%u %s correctable ECC data error%s\n", cnt, name[idx], cnt > 1 ? "s" : ""); } if (v & ECC_UE_INT_CAUSE) dev_alert(adapter->pdev_dev, "%s uncorrectable ECC data error\n", name[idx]); t4_write_reg(adapter, addr, v); if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) t4_fatal_err(adapter); } /* * MA interrupt handler. */ static void ma_intr_handler(struct adapter *adap) { u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); if (status & MEM_PERR_INT_CAUSE) dev_alert(adap->pdev_dev, "MA parity error, parity status %#x\n", t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); if (status & MEM_WRAP_INT_CAUSE) { v = t4_read_reg(adap, MA_INT_WRAP_STATUS); dev_alert(adap->pdev_dev, "MA address wrap-around error by " "client %u to address %#x\n", MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); } t4_write_reg(adap, MA_INT_CAUSE, status); t4_fatal_err(adap); } /* * SMB interrupt handler. */ static void smb_intr_handler(struct adapter *adap) { static const struct intr_info smb_intr_info[] = { { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) t4_fatal_err(adap); } /* * NC-SI interrupt handler. */ static void ncsi_intr_handler(struct adapter *adap) { static const struct intr_info ncsi_intr_info[] = { { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) t4_fatal_err(adap); } /* * XGMAC interrupt handler. */ static void xgmac_intr_handler(struct adapter *adap, int port) { u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; if (!v) return; if (v & TXFIFO_PRTY_ERR) dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", port); if (v & RXFIFO_PRTY_ERR) dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", port); t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); t4_fatal_err(adap); } /* * PL interrupt handler. */ static void pl_intr_handler(struct adapter *adap) { static const struct intr_info pl_intr_info[] = { { FATALPERR, "T4 fatal parity error", -1, 1 }, { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) t4_fatal_err(adap); } #define PF_INTR_MASK (PFSW) #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ CPL_SWITCH | SGE | ULP_TX) /** * t4_slow_intr_handler - control path interrupt handler * @adapter: the adapter * * T4 interrupt handler for non-data global interrupt events, e.g., errors. * The designation 'slow' is because it involves register reads, while * data interrupts typically don't involve any MMIOs. */ int t4_slow_intr_handler(struct adapter *adapter) { u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); if (!(cause & GLBL_INTR_MASK)) return 0; if (cause & CIM) cim_intr_handler(adapter); if (cause & MPS) mps_intr_handler(adapter); if (cause & NCSI) ncsi_intr_handler(adapter); if (cause & PL) pl_intr_handler(adapter); if (cause & SMB) smb_intr_handler(adapter); if (cause & XGMAC0) xgmac_intr_handler(adapter, 0); if (cause & XGMAC1) xgmac_intr_handler(adapter, 1); if (cause & XGMAC_KR0) xgmac_intr_handler(adapter, 2); if (cause & XGMAC_KR1) xgmac_intr_handler(adapter, 3); if (cause & PCIE) pcie_intr_handler(adapter); if (cause & MC) mem_intr_handler(adapter, MEM_MC); if (cause & EDC0) mem_intr_handler(adapter, MEM_EDC0); if (cause & EDC1) mem_intr_handler(adapter, MEM_EDC1); if (cause & LE) le_intr_handler(adapter); if (cause & TP) tp_intr_handler(adapter); if (cause & MA) ma_intr_handler(adapter); if (cause & PM_TX) pmtx_intr_handler(adapter); if (cause & PM_RX) pmrx_intr_handler(adapter); if (cause & ULP_RX) ulprx_intr_handler(adapter); if (cause & CPL_SWITCH) cplsw_intr_handler(adapter); if (cause & SGE) sge_intr_handler(adapter); if (cause & ULP_TX) ulptx_intr_handler(adapter); /* Clear the interrupts just processed for which we are the master. */ t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ return 1; } /** * t4_intr_enable - enable interrupts * @adapter: the adapter whose interrupts should be enabled * * Enable PF-specific interrupts for the calling function and the top-level * interrupt concentrator for global interrupts. Interrupts are already * enabled at each module, here we just enable the roots of the interrupt * hierarchies. * * Note: this function should be called only when the driver manages * non PF-specific interrupts from the various HW modules. Only one PCI * function at a time should be doing this. */ void t4_intr_enable(struct adapter *adapter) { u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | EGRESS_SIZE_ERR); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); } /** * t4_intr_disable - disable interrupts * @adapter: the adapter whose interrupts should be disabled * * Disable interrupts. We only disable the top-level interrupt * concentrators. The caller must be a PCI function managing global * interrupts. */ void t4_intr_disable(struct adapter *adapter) { u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); } /** * hash_mac_addr - return the hash value of a MAC address * @addr: the 48-bit Ethernet MAC address * * Hashes a MAC address according to the hash function used by HW inexact * (hash) address matching. */ static int hash_mac_addr(const u8 *addr) { u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; a ^= b; a ^= (a >> 12); a ^= (a >> 6); return a & 0x3f; } /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @mbox: mbox to use for the FW command * @viid: virtual interface whose RSS subtable is to be written * @start: start entry in the table to write * @n: how many table entries to write * @rspq: values for the response queue lookup table * @nrspq: number of values in @rspq * * Programs the selected part of the VI's RSS mapping table with the * provided values. If @nrspq < @n the supplied values are used repeatedly * until the full table range is populated. * * The caller must ensure the values in @rspq are in the range allowed for * @viid. */ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq) { int ret; const u16 *rsp = rspq; const u16 *rsp_end = rspq + nrspq; struct fw_rss_ind_tbl_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_RSS_IND_TBL_CMD_VIID(viid)); cmd.retval_len16 = htonl(FW_LEN16(cmd)); /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ while (n > 0) { int nq = min(n, 32); __be32 *qp = &cmd.iq0_to_iq2; cmd.niqid = htons(nq); cmd.startidx = htons(start); start += nq; n -= nq; while (nq > 0) { unsigned int v; v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); if (++rsp >= rsp_end) rsp = rspq; v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); if (++rsp >= rsp_end) rsp = rspq; v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); if (++rsp >= rsp_end) rsp = rspq; *qp++ = htonl(v); nq -= 3; } ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); if (ret) return ret; } return 0; } /** * t4_config_glbl_rss - configure the global RSS mode * @adapter: the adapter * @mbox: mbox to use for the FW command * @mode: global RSS mode * @flags: mode-specific flags * * Sets the global RSS mode. */ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, unsigned int flags) { struct fw_rss_glb_config_cmd c; memset(&c, 0, sizeof(c)); c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE); c.retval_len16 = htonl(FW_LEN16(c)); if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { c.u.basicvirtual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); } else return -EINVAL; return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); } /** * t4_tp_get_tcp_stats - read TP's TCP MIB counters * @adap: the adapter * @v4: holds the TCP/IP counter values * @v6: holds the TCP/IPv6 counter values * * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. * Either @v4 or @v6 may be %NULL to skip the corresponding stats. */ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6) { u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) #define STAT(x) val[STAT_IDX(x)] #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) if (v4) { t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); v4->tcpOutRsts = STAT(OUT_RST); v4->tcpInSegs = STAT64(IN_SEG); v4->tcpOutSegs = STAT64(OUT_SEG); v4->tcpRetransSegs = STAT64(RXT_SEG); } if (v6) { t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); v6->tcpOutRsts = STAT(OUT_RST); v6->tcpInSegs = STAT64(IN_SEG); v6->tcpOutSegs = STAT64(OUT_SEG); v6->tcpRetransSegs = STAT64(RXT_SEG); } #undef STAT64 #undef STAT #undef STAT_IDX } /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter * @mtus: where to store the MTU values * @mtu_log: where to store the MTU base-2 log (may be %NULL) * * Reads the HW path MTU table. */ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) { u32 v; int i; for (i = 0; i < NMTUS; ++i) { t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(0xff) | MTUVALUE(i)); v = t4_read_reg(adap, TP_MTU_TABLE); mtus[i] = MTUVALUE_GET(v); if (mtu_log) mtu_log[i] = MTUWIDTH_GET(v); } } /** * init_cong_ctrl - initialize congestion control parameters * @a: the alpha values for congestion control * @b: the beta values for congestion control * * Initialize the congestion control parameters. */ static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) { a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; a[9] = 2; a[10] = 3; a[11] = 4; a[12] = 5; a[13] = 6; a[14] = 7; a[15] = 8; a[16] = 9; a[17] = 10; a[18] = 14; a[19] = 17; a[20] = 21; a[21] = 25; a[22] = 30; a[23] = 35; a[24] = 45; a[25] = 60; a[26] = 80; a[27] = 100; a[28] = 200; a[29] = 300; a[30] = 400; a[31] = 500; b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; b[9] = b[10] = 1; b[11] = b[12] = 2; b[13] = b[14] = b[15] = b[16] = 3; b[17] = b[18] = b[19] = b[20] = b[21] = 4; b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; b[28] = b[29] = 6; b[30] = b[31] = 7; } /* The minimum additive increment value for the congestion control table */ #define CC_MIN_INCR 2U /** * t4_load_mtus - write the MTU and congestion control HW tables * @adap: the adapter * @mtus: the values for the MTU table * @alpha: the values for the congestion control alpha parameter * @beta: the values for the congestion control beta parameter * * Write the HW MTU table with the supplied MTUs and the high-speed * congestion control table with the supplied alpha, beta, and MTUs. * We write the two tables together because the additive increments * depend on the MTUs. */ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta) { static const unsigned int avg_pkts[NCCTRL_WIN] = { 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 28672, 40960, 57344, 81920, 114688, 163840, 229376 }; unsigned int i, w; for (i = 0; i < NMTUS; ++i) { unsigned int mtu = mtus[i]; unsigned int log2 = fls(mtu); if (!(mtu & ((1 << log2) >> 2))) /* round */ log2--; t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | MTUWIDTH(log2) | MTUVALUE(mtu)); for (w = 0; w < NCCTRL_WIN; ++w) { unsigned int inc; inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], CC_MIN_INCR); t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | (w << 16) | (beta[w] << 13) | inc); } } } /** * get_mps_bg_map - return the buffer groups associated with a port * @adap: the adapter * @idx: the port index * * Returns a bitmap indicating which MPS buffer groups are associated * with the given port. Bit i is set if buffer group i is used by the * port. */ static unsigned int get_mps_bg_map(struct adapter *adap, int idx) { u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); if (n == 0) return idx == 0 ? 0xf : 0; if (n == 1) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } /** * t4_get_port_stats - collect port statistics * @adap: the adapter * @idx: the port index * @p: the stats structure to fill * * Collect statistics related to the given port from HW. */ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = get_mps_bg_map(adap, idx); #define GET_STAT(name) \ t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) p->tx_octets = GET_STAT(TX_PORT_BYTES); p->tx_frames = GET_STAT(TX_PORT_FRAMES); p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); p->tx_error_frames = GET_STAT(TX_PORT_ERROR); p->tx_frames_64 = GET_STAT(TX_PORT_64B); p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); p->tx_drop = GET_STAT(TX_PORT_DROP); p->tx_pause = GET_STAT(TX_PORT_PAUSE); p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); p->rx_runt = GET_STAT(RX_PORT_LESS_64B); p->rx_frames_64 = GET_STAT(RX_PORT_64B); p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); p->rx_pause = GET_STAT(RX_PORT_PAUSE); p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; #undef GET_STAT #undef GET_STAT_COM } /** * t4_wol_magic_enable - enable/disable magic packet WoL * @adap: the adapter * @port: the physical port index * @addr: MAC address expected in magic packets, %NULL to disable * * Enables/disables magic packet wake-on-LAN for the selected port. */ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr) { if (addr) { t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]); t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), (addr[0] << 8) | addr[1]); } t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, addr ? MAGICEN : 0); } /** * t4_wol_pat_enable - enable/disable pattern-based WoL * @adap: the adapter * @port: the physical port index * @map: bitmap of which HW pattern filters to set * @mask0: byte mask for bytes 0-63 of a packet * @mask1: byte mask for bytes 64-127 of a packet * @crc: Ethernet CRC for selected bytes * @enable: enable/disable switch * * Sets the pattern filters indicated in @map to mask out the bytes * specified in @mask0/@mask1 in received packets and compare the CRC of * the resulting packet against @crc. If @enable is %true pattern-based * WoL is enabled, otherwise disabled. */ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, u64 mask0, u64 mask1, unsigned int crc, bool enable) { int i; if (!enable) { t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), PATEN, 0); return 0; } if (map > 0xff) return -EINVAL; #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); t4_write_reg(adap, EPIO_REG(DATA2), mask1); t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); for (i = 0; i < NWOL_PAT; i++, map >>= 1) { if (!(map & 1)) continue; /* write byte masks */ t4_write_reg(adap, EPIO_REG(DATA0), mask0); t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) return -ETIMEDOUT; /* write CRC */ t4_write_reg(adap, EPIO_REG(DATA0), crc); t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) return -ETIMEDOUT; } #undef EPIO_REG t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); return 0; } #define INIT_CMD(var, cmd, rd_wr) do { \ (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ FW_CMD_REQUEST | FW_CMD_##rd_wr); \ (var).retval_len16 = htonl(FW_LEN16(var)); \ } while (0) /** * t4_mdio_rd - read a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to read * @valp: where to store the value * * Issues a FW command through the given mailbox to read a PHY register. */ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 *valp) { int ret; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = htons(reg); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) *valp = ntohs(c.u.mdio.rval); return ret; } /** * t4_mdio_wr - write a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to write * @valp: value to write * * Issues a FW command through the given mailbox to write a PHY register. */ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 val) { struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = htons(reg); c.u.mdio.rval = htons(val); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_hello - establish communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @evt_mbox: mailbox to receive async FW events * @master: specifies the caller's willingness to be the device master * @state: returns the current device state * * Issues a command to establish communication with FW. */ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state) { int ret; struct fw_hello_cmd c; INIT_CMD(c, HELLO, WRITE); c.err_to_mbasyncnot = htonl( FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0 && state) { u32 v = ntohl(c.err_to_mbasyncnot); if (v & FW_HELLO_CMD_INIT) *state = DEV_STATE_INIT; else if (v & FW_HELLO_CMD_ERR) *state = DEV_STATE_ERR; else *state = DEV_STATE_UNINIT; } return ret; } /** * t4_fw_bye - end communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to terminate communication with FW. */ int t4_fw_bye(struct adapter *adap, unsigned int mbox) { struct fw_bye_cmd c; INIT_CMD(c, BYE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_init_cmd - ask FW to initialize the device * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to FW to partially initialize the device. This * performs initialization that generally doesn't depend on user input. */ int t4_early_init(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_reset - issue a reset to FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @reset: specifies the type of reset to perform * * Issues a reset command of the specified type to FW. */ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) { struct fw_reset_cmd c; INIT_CMD(c, RESET, WRITE); c.val = htonl(reset); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_query_params - query FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * * Reads the value of FW or device parameters. Up to 7 parameters can be * queried at once. */ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val) { int i, ret; struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); for (i = 0; i < nparams; i++, p += 2) *p = htonl(*params++); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) *val++ = ntohl(*p); return ret; } /** * t4_set_params - sets FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * * Sets the value of FW or device parameters. Up to 7 parameters can be * specified at once. */ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val) { struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); while (nparams--) { *p++ = htonl(*params++); *p++ = htonl(*val++); } return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_cfg_pfvf - configure PF/VF resource limits * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF being configured * @vf: the VF being configured * @txq: the max number of egress queues * @txq_eth_ctrl: the max number of egress Ethernet or control queues * @rxqi: the max number of interrupt-capable ingress queues * @rxq: the max number of interruptless ingress queues * @tc: the PCI traffic class * @vi: the max number of virtual interfaces * @cmask: the channel access rights mask for the PF/VF * @pmask: the port access rights mask for the PF/VF * @nexact: the maximum number of exact MPS filters * @rcaps: read capabilities * @wxcaps: write/execute capabilities * * Configures resource limits and capabilities for a physical or virtual * function. */ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) { struct fw_pfvf_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | FW_PFVF_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | FW_PFVF_CMD_NIQ(rxq)); c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | FW_PFVF_CMD_PMASK(pmask) | FW_PFVF_CMD_NEQ(txq)); c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | FW_PFVF_CMD_NEXACTF(nexact)); c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | FW_PFVF_CMD_WX_CAPS(wxcaps) | FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_alloc_vi - allocate a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI * @pf: the PF owning the VI * @vf: the VF owning the VI * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI * * Allocates a virtual interface for the given physical port. If @mac is * not %NULL it contains the MAC addresses of the VI as assigned by FW. * @mac should be large enough to hold @nmac Ethernet addresses, they are * stored consecutively so the space needed is @nmac * 6 bytes. * Returns a negative error number or the non-negative VI id. */ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, unsigned int *rss_size) { int ret; struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); c.portid_pkd = FW_VI_CMD_PORTID(port); c.nmac = nmac - 1; ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) return ret; if (mac) { memcpy(mac, c.mac, sizeof(c.mac)); switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } } if (rss_size) *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); } /** * t4_set_rxmode - set Rx properties of a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @mtu: the new MTU or -1 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change * @sleep_ok: if true we may sleep while awaiting command completion * * Sets Rx properties of a virtual interface. */ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok) { struct fw_vi_rxmode_cmd c; /* convert to FW values */ if (mtu < 0) mtu = FW_RXMODE_MTU_NO_CHG; if (promisc < 0) promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; if (all_multi < 0) all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; if (bcast < 0) bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; if (vlanex < 0) vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); c.retval_len16 = htonl(FW_LEN16(c)); c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | FW_VI_RXMODE_CMD_PROMISCEN(promisc) | FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @free: if true any existing filters for this VI id are first removed * @naddr: the number of MAC addresses to allocate filters for (up to 7) * @addr: the MAC address(es) * @idx: where to store the index of each allocated filter * @hash: pointer to hash address filter bitmap * @sleep_ok: call is allowed to sleep * * Allocates an exact-match filter for each of the supplied addresses and * sets it to the corresponding address. If @idx is not %NULL it should * have at least @naddr entries, each of which will be set to the index of * the filter allocated for the corresponding MAC address. If a filter * could not be allocated for an address its index is set to 0xffff. * If @hash is not %NULL addresses that fail to allocate an exact filter * are hashed and update the hash filter bitmap pointed at by @hash. * * Returns a negative error number or the number of filters allocated. */ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { int i, ret; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p; if (naddr > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | FW_CMD_LEN16((naddr + 2) / 2)); for (i = 0, p = c.u.exact; i < naddr; i++, p++) { p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); } ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); if (ret) return ret; for (i = 0, p = c.u.exact; i < naddr; i++, p++) { u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); if (idx) idx[i] = index >= NEXACT_MAC ? 0xffff : index; if (index < NEXACT_MAC) ret++; else if (hash) *hash |= (1ULL << hash_mac_addr(addr[i])); } return ret; } /** * t4_change_mac - modifies the exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @idx: index of existing filter for old value of MAC address, or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent * @add_smt: if true also add the address to the HW SMT * * Modifies an exact-match filter and sets it to the new MAC address. * Note that in general it is not possible to modify the value of a given * filter so the generic way to modify an address filter is to free the one * being used by the old address value and allocate a new filter for the * new address value. @idx can be -1 if the address is a new addition. * * Returns a negative error number or the index of the filter with the new * MAC value. */ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, bool add_smt) { int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | FW_VI_MAC_CMD_SMAC_RESULT(mode) | FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); if (ret >= NEXACT_MAC) ret = -ENOMEM; } return ret; } /** * t4_set_addr_hash - program the MAC inexact-match hash filter * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @ucast: whether the hash filter should also match unicast addresses * @vec: the value to be written to the hash filter * @sleep_ok: call is allowed to sleep * * Sets the 64-bit inexact-match hash filter for a virtual interface. */ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok) { struct fw_vi_mac_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | FW_VI_MAC_CMD_HASHUNIEN(ucast) | FW_CMD_LEN16(1)); c.u.hash.hashvec = cpu_to_be64(vec); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_enable_vi - enable/disable a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @rx_en: 1=enable Rx, 0=disable Rx * @tx_en: 1=enable Tx, 0=disable Tx * * Enables/disables a virtual interface. */ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en) { struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_identify_port - identify a VI's port by blinking its LED * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @nblinks: how many times to blink LED at 2.5 Hz * * Identifies a VI's port by blinking its LED. */ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int nblinks) { struct fw_vi_enable_cmd c; c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); c.blinkdur = htons(nblinks); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_iq_free - free an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queues * @vf: the VF owning the queues * @iqtype: the ingress queue type * @iqid: ingress queue id * @fl0id: FL0 queue id or 0xffff if no attached FL0 * @fl1id: FL1 queue id or 0xffff if no attached FL1 * * Frees an ingress queue and its associated FLs, if any. */ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id) { struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); c.iqid = htons(iqid); c.fl0id = htons(fl0id); c.fl1id = htons(fl1id); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_eth_eq_free - free an Ethernet egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees an Ethernet egress queue. */ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | FW_EQ_ETH_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ctrl_eq_free - free a control egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ctrl_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | FW_EQ_CTRL_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ofld_eq_free - free an offload egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ofld_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | FW_EQ_OFLD_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter * @rpl: start of the FW message * * Processes a FW message, such as link state change messages. */ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; if (opcode == FW_PORT_CMD) { /* link/module state change message */ int speed = 0, fc = 0; const struct fw_port_cmd *p = (void *)rpl; int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); int port = adap->chan_map[chan]; struct port_info *pi = adap2pinfo(adap, port); struct link_config *lc = &pi->link_cfg; u32 stat = ntohl(p->u.info.lstatus_to_modtype); int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); if (stat & FW_PORT_CMD_RXPAUSE) fc |= PAUSE_RX; if (stat & FW_PORT_CMD_TXPAUSE) fc |= PAUSE_TX; if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) speed = SPEED_100; else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) speed = SPEED_1000; else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) speed = SPEED_10000; if (link_ok != lc->link_ok || speed != lc->speed || fc != lc->fc) { /* something changed */ lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; t4_os_link_changed(adap, port, link_ok); } if (mod != pi->mod_type) { pi->mod_type = mod; t4_os_portmod_changed(adap, port); } } return 0; } static void __devinit get_pci_mode(struct adapter *adapter, struct pci_params *p) { u16 val; u32 pcie_cap = pci_pcie_cap(adapter->pdev); if (pcie_cap) { pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, &val); p->speed = val & PCI_EXP_LNKSTA_CLS; p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; } } /** * init_link_config - initialize a link's SW state * @lc: structure holding the link state * @caps: link capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ static void __devinit init_link_config(struct link_config *lc, unsigned int caps) { lc->supported = caps; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; if (lc->supported & FW_PORT_CAP_ANEG) { lc->advertising = lc->supported & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->advertising = 0; lc->autoneg = AUTONEG_DISABLE; } } int t4_wait_dev_ready(struct adapter *adap) { if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) return 0; msleep(500); return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; } static int __devinit get_flash_params(struct adapter *adap) { int ret; u32 info; ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); if (!ret) ret = sf1_read(adap, 3, 0, 1, &info); t4_write_reg(adap, SF_OP, 0); /* unlock SF */ if (ret) return ret; if ((info & 0xff) != 0x20) /* not a Numonix flash */ return -EINVAL; info >>= 16; /* log2 of size */ if (info >= 0x14 && info < 0x18) adap->params.sf_nsec = 1 << (info - 16); else if (info == 0x18) adap->params.sf_nsec = 64; else return -EINVAL; adap->params.sf_size = 1 << info; adap->params.sf_fw_start = t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; return 0; } /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter * @reset: if true perform a HW reset * * Initialize adapter SW state for the various HW modules, set initial * values for some adapter tunables, take PHYs out of reset, and * initialize the MDIO interface. */ int __devinit t4_prep_adapter(struct adapter *adapter) { int ret; ret = t4_wait_dev_ready(adapter); if (ret < 0) return ret; get_pci_mode(adapter, &adapter->params.pci); adapter->params.rev = t4_read_reg(adapter, PL_REV); ret = get_flash_params(adapter); if (ret < 0) { dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); return ret; } ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* * Default port for debugging in case we can't reach FW. */ adapter->params.nports = 1; adapter->params.portvec = 1; return 0; } int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) { u8 addr[6]; int ret, i, j = 0; struct fw_port_cmd c; struct fw_rss_vi_config_cmd rvc; memset(&c, 0, sizeof(c)); memset(&rvc, 0, sizeof(rvc)); for_each_port(adap, i) { unsigned int rss_size; struct port_info *p = adap2pinfo(adap, i); while ((adap->params.portvec & (1 << j)) == 0) j++; c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_PORT_CMD_PORTID(j)); c.action_to_len16 = htonl( FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(c)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) return ret; ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); if (ret < 0) return ret; p->viid = ret; p->tx_chan = j; p->lport = j; p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); adap->port[i]->dev_id = j; ret = ntohl(c.u.info.lstatus_to_modtype); p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? FW_PORT_CMD_MDIOADDR_GET(ret) : -1; p->port_type = FW_PORT_CMD_PTYPE_GET(ret); p->mod_type = FW_PORT_MOD_TYPE_NA; rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); rvc.retval_len16 = htonl(FW_LEN16(rvc)); ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); if (ret) return ret; p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); j++; } return 0; }
gpl-2.0
wrongway801/N900T_Kernel
drivers/isdn/hardware/avm/b1isa.c
9529
6000
/* $Id: b1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil Exp $ * * Module for AVM B1 ISA-card. * * Copyright 1999 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/capi.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/io.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> #include <linux/isdn/capilli.h> #include "avmcard.h" /* ------------------------------------------------------------- */ static char *revision = "$Revision: 1.1.2.3 $"; /* ------------------------------------------------------------- */ MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 ISA card"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------- */ static void b1isa_remove(struct pci_dev *pdev) { avmctrl_info *cinfo = pci_get_drvdata(pdev); avmcard *card; if (!cinfo) return; card = cinfo->card; b1_reset(card->port); b1_reset(card->port); detach_capi_ctr(&cinfo->capi_ctrl); free_irq(card->irq, card); release_region(card->port, AVMB1_PORTLEN); b1_free_card(card); } /* ------------------------------------------------------------- */ static char *b1isa_procinfo(struct capi_ctr *ctrl); static int b1isa_probe(struct pci_dev *pdev) { avmctrl_info *cinfo; avmcard *card; int retval; card = b1_alloc_card(1); if (!card) { printk(KERN_WARNING "b1isa: no memory.\n"); retval = -ENOMEM; goto err; } cinfo = card->ctrlinfo; card->port = pci_resource_start(pdev, 0); card->irq = pdev->irq; card->cardtype = avm_b1isa; sprintf(card->name, "b1isa-%x", card->port); if (card->port != 0x150 && card->port != 0x250 && card->port != 0x300 && card->port != 0x340) { printk(KERN_WARNING "b1isa: invalid port 0x%x.\n", card->port); retval = -EINVAL; goto err_free; } if (b1_irq_table[card->irq & 0xf] == 0) { printk(KERN_WARNING "b1isa: irq %d not valid.\n", card->irq); retval = -EINVAL; goto err_free; } if (!request_region(card->port, AVMB1_PORTLEN, card->name)) { printk(KERN_WARNING "b1isa: ports 0x%03x-0x%03x in use.\n", card->port, card->port + AVMB1_PORTLEN); retval = -EBUSY; goto err_free; } retval = request_irq(card->irq, b1_interrupt, 0, card->name, card); if (retval) { printk(KERN_ERR "b1isa: unable to get IRQ %d.\n", card->irq); goto err_release_region; } b1_reset(card->port); if ((retval = b1_detect(card->port, card->cardtype)) != 0) { printk(KERN_NOTICE "b1isa: NO card at 0x%x (%d)\n", card->port, retval); retval = -ENODEV; goto err_free_irq; } b1_reset(card->port); b1_getrevision(card); cinfo->capi_ctrl.owner = THIS_MODULE; cinfo->capi_ctrl.driver_name = "b1isa"; cinfo->capi_ctrl.driverdata = cinfo; cinfo->capi_ctrl.register_appl = b1_register_appl; cinfo->capi_ctrl.release_appl = b1_release_appl; cinfo->capi_ctrl.send_message = b1_send_message; cinfo->capi_ctrl.load_firmware = b1_load_firmware; cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; cinfo->capi_ctrl.procinfo = b1isa_procinfo; cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops; strcpy(cinfo->capi_ctrl.name, card->name); retval = attach_capi_ctr(&cinfo->capi_ctrl); if (retval) { printk(KERN_ERR "b1isa: attach controller failed.\n"); goto err_free_irq; } printk(KERN_INFO "b1isa: AVM B1 ISA at i/o %#x, irq %d, revision %d\n", card->port, card->irq, card->revision); pci_set_drvdata(pdev, cinfo); return 0; err_free_irq: free_irq(card->irq, card); err_release_region: release_region(card->port, AVMB1_PORTLEN); err_free: b1_free_card(card); err: return retval; } static char *b1isa_procinfo(struct capi_ctr *ctrl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); if (!cinfo) return ""; sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d", cinfo->cardname[0] ? cinfo->cardname : "-", cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-", cinfo->card ? cinfo->card->port : 0x0, cinfo->card ? cinfo->card->irq : 0, cinfo->card ? cinfo->card->revision : 0 ); return cinfo->infobuf; } /* ------------------------------------------------------------- */ #define MAX_CARDS 4 static struct pci_dev isa_dev[MAX_CARDS]; static int io[MAX_CARDS]; static int irq[MAX_CARDS]; module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); static int b1isa_add_card(struct capi_driver *driver, capicardparams *data) { int i; for (i = 0; i < MAX_CARDS; i++) { if (isa_dev[i].resource[0].start) continue; isa_dev[i].resource[0].start = data->port; isa_dev[i].irq = data->irq; if (b1isa_probe(&isa_dev[i]) == 0) return 0; } return -ENODEV; } static struct capi_driver capi_driver_b1isa = { .name = "b1isa", .revision = "1.0", .add_card = b1isa_add_card, }; static int __init b1isa_init(void) { char *p; char rev[32]; int i; if ((p = strchr(revision, ':')) != NULL && p[1]) { strlcpy(rev, p + 2, 32); if ((p = strchr(rev, '$')) != NULL && p > rev) *(p - 1) = 0; } else strcpy(rev, "1.0"); for (i = 0; i < MAX_CARDS; i++) { if (!io[i]) break; isa_dev[i].resource[0].start = io[i]; isa_dev[i].irq = irq[i]; if (b1isa_probe(&isa_dev[i]) != 0) return -ENODEV; } strlcpy(capi_driver_b1isa.revision, rev, 32); register_capi_driver(&capi_driver_b1isa); printk(KERN_INFO "b1isa: revision %s\n", rev); return 0; } static void __exit b1isa_exit(void) { int i; for (i = 0; i < MAX_CARDS; i++) { if (isa_dev[i].resource[0].start) b1isa_remove(&isa_dev[i]); } unregister_capi_driver(&capi_driver_b1isa); } module_init(b1isa_init); module_exit(b1isa_exit);
gpl-2.0
charles1018/kernel_sony_14.4.A.0.157
arch/x86/math-emu/reg_compare.c
13881
8289
/*---------------------------------------------------------------------------+ | reg_compare.c | | | | Compare two floating point registers | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------+ | compare() is the core FPU_REG comparison function | +---------------------------------------------------------------------------*/ #include "fpu_system.h" #include "exception.h" #include "fpu_emu.h" #include "control_w.h" #include "status_w.h" static int compare(FPU_REG const *b, int tagb) { int diff, exp0, expb; u_char st0_tag; FPU_REG *st0_ptr; FPU_REG x, y; u_char st0_sign, signb = getsign(b); st0_ptr = &st(0); st0_tag = FPU_gettag0(); st0_sign = getsign(st0_ptr); if (tagb == TAG_Special) tagb = FPU_Special(b); if (st0_tag == TAG_Special) st0_tag = FPU_Special(st0_ptr); if (((st0_tag != TAG_Valid) && (st0_tag != TW_Denormal)) || ((tagb != TAG_Valid) && (tagb != TW_Denormal))) { if (st0_tag == TAG_Zero) { if (tagb == TAG_Zero) return COMP_A_eq_B; if (tagb == TAG_Valid) return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B); if (tagb == TW_Denormal) return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B) | COMP_Denormal; } else if (tagb == TAG_Zero) { if (st0_tag == TAG_Valid) return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B); if (st0_tag == TW_Denormal) return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B) | COMP_Denormal; } if (st0_tag == TW_Infinity) { if ((tagb == TAG_Valid) || (tagb == TAG_Zero)) return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B); else if (tagb == TW_Denormal) return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B) | COMP_Denormal; else if (tagb == TW_Infinity) { /* The 80486 book says that infinities can be equal! */ return (st0_sign == signb) ? COMP_A_eq_B : ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B); } /* Fall through to the NaN code */ } else if (tagb == TW_Infinity) { if ((st0_tag == TAG_Valid) || (st0_tag == TAG_Zero)) return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B); if (st0_tag == TW_Denormal) return ((signb == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B) | COMP_Denormal; /* Fall through to the NaN code */ } /* The only possibility now should be that one of the arguments is a NaN */ if ((st0_tag == TW_NaN) || (tagb == TW_NaN)) { int signalling = 0, unsupported = 0; if (st0_tag == TW_NaN) { signalling = (st0_ptr->sigh & 0xc0000000) == 0x80000000; unsupported = !((exponent(st0_ptr) == EXP_OVER) && (st0_ptr-> sigh & 0x80000000)); } if (tagb == TW_NaN) { signalling |= (b->sigh & 0xc0000000) == 0x80000000; unsupported |= !((exponent(b) == EXP_OVER) && (b->sigh & 0x80000000)); } if (signalling || unsupported) return COMP_No_Comp | COMP_SNaN | COMP_NaN; else /* Neither is a signaling NaN */ return COMP_No_Comp | COMP_NaN; } EXCEPTION(EX_Invalid); } if (st0_sign != signb) { return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B) | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ? COMP_Denormal : 0); } if ((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) { FPU_to_exp16(st0_ptr, &x); FPU_to_exp16(b, &y); st0_ptr = &x; b = &y; exp0 = exponent16(st0_ptr); expb = exponent16(b); } else { exp0 = exponent(st0_ptr); expb = exponent(b); } #ifdef PARANOID if (!(st0_ptr->sigh & 0x80000000)) EXCEPTION(EX_Invalid); if (!(b->sigh & 0x80000000)) EXCEPTION(EX_Invalid); #endif /* PARANOID */ diff = exp0 - expb; if (diff == 0) { diff = st0_ptr->sigh - b->sigh; /* Works only if ms bits are identical */ if (diff == 0) { diff = st0_ptr->sigl > b->sigl; if (diff == 0) diff = -(st0_ptr->sigl < b->sigl); } } if (diff > 0) { return ((st0_sign == SIGN_POS) ? COMP_A_gt_B : COMP_A_lt_B) | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ? COMP_Denormal : 0); } if (diff < 0) { return ((st0_sign == SIGN_POS) ? COMP_A_lt_B : COMP_A_gt_B) | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ? COMP_Denormal : 0); } return COMP_A_eq_B | (((st0_tag == TW_Denormal) || (tagb == TW_Denormal)) ? COMP_Denormal : 0); } /* This function requires that st(0) is not empty */ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag) { int f = 0, c; c = compare(loaded_data, loaded_tag); if (c & COMP_NaN) { EXCEPTION(EX_Invalid); f = SW_C3 | SW_C2 | SW_C0; } else switch (c & 7) { case COMP_A_lt_B: f = SW_C0; break; case COMP_A_eq_B: f = SW_C3; break; case COMP_A_gt_B: f = 0; break; case COMP_No_Comp: f = SW_C3 | SW_C2 | SW_C0; break; #ifdef PARANOID default: EXCEPTION(EX_INTERNAL | 0x121); f = SW_C3 | SW_C2 | SW_C0; break; #endif /* PARANOID */ } setcc(f); if (c & COMP_Denormal) { return denormal_operand() < 0; } return 0; } static int compare_st_st(int nr) { int f = 0, c; FPU_REG *st_ptr; if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) { setcc(SW_C3 | SW_C2 | SW_C0); /* Stack fault */ EXCEPTION(EX_StackUnder); return !(control_word & CW_Invalid); } st_ptr = &st(nr); c = compare(st_ptr, FPU_gettagi(nr)); if (c & COMP_NaN) { setcc(SW_C3 | SW_C2 | SW_C0); EXCEPTION(EX_Invalid); return !(control_word & CW_Invalid); } else switch (c & 7) { case COMP_A_lt_B: f = SW_C0; break; case COMP_A_eq_B: f = SW_C3; break; case COMP_A_gt_B: f = 0; break; case COMP_No_Comp: f = SW_C3 | SW_C2 | SW_C0; break; #ifdef PARANOID default: EXCEPTION(EX_INTERNAL | 0x122); f = SW_C3 | SW_C2 | SW_C0; break; #endif /* PARANOID */ } setcc(f); if (c & COMP_Denormal) { return denormal_operand() < 0; } return 0; } static int compare_u_st_st(int nr) { int f = 0, c; FPU_REG *st_ptr; if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) { setcc(SW_C3 | SW_C2 | SW_C0); /* Stack fault */ EXCEPTION(EX_StackUnder); return !(control_word & CW_Invalid); } st_ptr = &st(nr); c = compare(st_ptr, FPU_gettagi(nr)); if (c & COMP_NaN) { setcc(SW_C3 | SW_C2 | SW_C0); if (c & COMP_SNaN) { /* This is the only difference between un-ordered and ordinary comparisons */ EXCEPTION(EX_Invalid); return !(control_word & CW_Invalid); } return 0; } else switch (c & 7) { case COMP_A_lt_B: f = SW_C0; break; case COMP_A_eq_B: f = SW_C3; break; case COMP_A_gt_B: f = 0; break; case COMP_No_Comp: f = SW_C3 | SW_C2 | SW_C0; break; #ifdef PARANOID default: EXCEPTION(EX_INTERNAL | 0x123); f = SW_C3 | SW_C2 | SW_C0; break; #endif /* PARANOID */ } setcc(f); if (c & COMP_Denormal) { return denormal_operand() < 0; } return 0; } /*---------------------------------------------------------------------------*/ void fcom_st(void) { /* fcom st(i) */ compare_st_st(FPU_rm); } void fcompst(void) { /* fcomp st(i) */ if (!compare_st_st(FPU_rm)) FPU_pop(); } void fcompp(void) { /* fcompp */ if (FPU_rm != 1) { FPU_illegal(); return; } if (!compare_st_st(1)) poppop(); } void fucom_(void) { /* fucom st(i) */ compare_u_st_st(FPU_rm); } void fucomp(void) { /* fucomp st(i) */ if (!compare_u_st_st(FPU_rm)) FPU_pop(); } void fucompp(void) { /* fucompp */ if (FPU_rm == 1) { if (!compare_u_st_st(1)) poppop(); } else FPU_illegal(); }
gpl-2.0
JulianKemmerer/Drexel-CS370
arch/i386/mm/mmap.c
58
2316
/* * linux/arch/i386/mm/mmap.c * * flexible mmap layout support * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * Started by Ingo Molnar <mingo@elte.hu> */ #include <linux/personality.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> /* * Top of mmap area (just below the process stack). * * Leave an at least ~128 MB hole. */ #define MIN_GAP (128*1024*1024) #define MAX_GAP (TASK_SIZE/6*5) static inline unsigned long mmap_base(struct mm_struct *mm) { unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; unsigned long random_factor = 0; if (current->flags & PF_RANDOMIZE) random_factor = get_random_int() % (1024*1024); if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; return PAGE_ALIGN(TASK_SIZE - gap - random_factor); } /* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { /* * Fall back to the standard layout if the personality * bit is set, or if the expected stack growth is unlimited: */ if (sysctl_legacy_va_layout || (current->personality & ADDR_COMPAT_LAYOUT) || current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(mm); mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } }
gpl-2.0
AndroidRoot/android_kernel_asus_tf101
drivers/telephony/ixj_pcmcia.c
314
3568
#include "ixj-ver.h" #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> /* printk() */ #include <linux/fs.h> /* everything... */ #include <linux/errno.h> /* error codes */ #include <linux/slab.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "ixj.h" /* * PCMCIA service support for Quicknet cards */ typedef struct ixj_info_t { int ndev; struct ixj *port; } ixj_info_t; static void ixj_detach(struct pcmcia_device *p_dev); static int ixj_config(struct pcmcia_device * link); static void ixj_cs_release(struct pcmcia_device * link); static int ixj_probe(struct pcmcia_device *p_dev) { dev_dbg(&p_dev->dev, "ixj_attach()\n"); /* Create new ixj device */ p_dev->priv = kzalloc(sizeof(struct ixj_info_t), GFP_KERNEL); if (!p_dev->priv) { return -ENOMEM; } return ixj_config(p_dev); } static void ixj_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "ixj_detach\n"); ixj_cs_release(link); kfree(link->priv); } static void ixj_get_serial(struct pcmcia_device * link, IXJ * j) { char *str; int i, place; dev_dbg(&link->dev, "ixj_get_serial\n"); str = link->prod_id[0]; if (!str) goto failed; printk("%s", str); str = link->prod_id[1]; if (!str) goto failed; printk(" %s", str); str = link->prod_id[2]; if (!str) goto failed; place = 1; for (i = strlen(str) - 1; i >= 0; i--) { switch (str[i]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': j->serial += (str[i] - 48) * place; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j->serial += (str[i] - 55) * place; break; case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j->serial += (str[i] - 87) * place; break; } place = place * 0x10; } str = link->prod_id[3]; if (!str) goto failed; printk(" version %s\n", str); failed: return; } static int ixj_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 3; return pcmcia_request_io(p_dev); } static int ixj_config(struct pcmcia_device * link) { IXJ *j; ixj_info_t *info; info = link->priv; dev_dbg(&link->dev, "ixj_config\n"); link->config_flags = CONF_AUTO_SET_IO; if (pcmcia_loop_config(link, ixj_config_check, NULL)) goto failed; if (pcmcia_enable_device(link)) goto failed; /* * Register the card with the core. */ j = ixj_pcmcia_probe(link->resource[0]->start, link->resource[0]->start + 0x10); info->ndev = 1; ixj_get_serial(link, j); return 0; failed: ixj_cs_release(link); return -ENODEV; } static void ixj_cs_release(struct pcmcia_device *link) { ixj_info_t *info = link->priv; dev_dbg(&link->dev, "ixj_cs_release\n"); info->ndev = 0; pcmcia_disable_device(link); } static struct pcmcia_device_id ixj_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0257, 0x0600), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, ixj_ids); static struct pcmcia_driver ixj_driver = { .owner = THIS_MODULE, .name = "ixj_cs", .probe = ixj_probe, .remove = ixj_detach, .id_table = ixj_ids, }; static int __init ixj_pcmcia_init(void) { return pcmcia_register_driver(&ixj_driver); } static void ixj_pcmcia_exit(void) { pcmcia_unregister_driver(&ixj_driver); } module_init(ixj_pcmcia_init); module_exit(ixj_pcmcia_exit); MODULE_LICENSE("GPL");
gpl-2.0
TheGreatSega/Rush-Kernel
drivers/mtd/nand/ndfc.c
570
7540
/* * drivers/mtd/ndfc.c * * Overview: * Platform independent driver for NDFC (NanD Flash Controller) * integrated into EP440 cores * * Ported to an OF platform driver by Sean MacLennan * * The NDFC supports multiple chips, but this driver only supports a * single chip since I do not have access to any boards with * multiple chips. * * Author: Thomas Gleixner * * Copyright 2006 IBM * Copyright 2008 PIKA Technologies * Sean MacLennan <smaclennan@pikatech.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/mtd/ndfc.h> #include <linux/mtd/mtd.h> #include <linux/of_platform.h> #include <asm/io.h> struct ndfc_controller { struct of_device *ofdev; void __iomem *ndfcbase; struct mtd_info mtd; struct nand_chip chip; int chip_select; struct nand_hw_control ndfc_control; #ifdef CONFIG_MTD_PARTITIONS struct mtd_partition *parts; #endif }; static struct ndfc_controller ndfc_ctrl; static void ndfc_select_chip(struct mtd_info *mtd, int chip) { uint32_t ccr; struct ndfc_controller *ndfc = &ndfc_ctrl; ccr = in_be32(ndfc->ndfcbase + NDFC_CCR); if (chip >= 0) { ccr &= ~NDFC_CCR_BS_MASK; ccr |= NDFC_CCR_BS(chip + ndfc->chip_select); } else ccr |= NDFC_CCR_RESET_CE; out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); } static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct ndfc_controller *ndfc = &ndfc_ctrl; if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD); else writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE); } static int ndfc_ready(struct mtd_info *mtd) { struct ndfc_controller *ndfc = &ndfc_ctrl; return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY; } static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode) { uint32_t ccr; struct ndfc_controller *ndfc = &ndfc_ctrl; ccr = in_be32(ndfc->ndfcbase + NDFC_CCR); ccr |= NDFC_CCR_RESET_ECC; out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); wmb(); } static int ndfc_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct ndfc_controller *ndfc = &ndfc_ctrl; uint32_t ecc; uint8_t *p = (uint8_t *)&ecc; wmb(); ecc = in_be32(ndfc->ndfcbase + NDFC_ECC); /* The NDFC uses Smart Media (SMC) bytes order */ ecc_code[0] = p[1]; ecc_code[1] = p[2]; ecc_code[2] = p[3]; return 0; } /* * Speedups for buffer read/write/verify * * NDFC allows 32bit read/write of data. So we can speed up the buffer * functions. No further checking, as nand_base will always read/write * page aligned. */ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct ndfc_controller *ndfc = &ndfc_ctrl; uint32_t *p = (uint32_t *) buf; for(;len > 0; len -= 4) *p++ = in_be32(ndfc->ndfcbase + NDFC_DATA); } static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct ndfc_controller *ndfc = &ndfc_ctrl; uint32_t *p = (uint32_t *) buf; for(;len > 0; len -= 4) out_be32(ndfc->ndfcbase + NDFC_DATA, *p++); } static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct ndfc_controller *ndfc = &ndfc_ctrl; uint32_t *p = (uint32_t *) buf; for(;len > 0; len -= 4) if (*p++ != in_be32(ndfc->ndfcbase + NDFC_DATA)) return -EFAULT; return 0; } /* * Initialize chip structure */ static int ndfc_chip_init(struct ndfc_controller *ndfc, struct device_node *node) { #ifdef CONFIG_MTD_PARTITIONS #ifdef CONFIG_MTD_CMDLINE_PARTS static const char *part_types[] = { "cmdlinepart", NULL }; #else static const char *part_types[] = { NULL }; #endif #endif struct device_node *flash_np; struct nand_chip *chip = &ndfc->chip; int ret; chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA; chip->cmd_ctrl = ndfc_hwcontrol; chip->dev_ready = ndfc_ready; chip->select_chip = ndfc_select_chip; chip->chip_delay = 50; chip->controller = &ndfc->ndfc_control; chip->read_buf = ndfc_read_buf; chip->write_buf = ndfc_write_buf; chip->verify_buf = ndfc_verify_buf; chip->ecc.correct = nand_correct_data; chip->ecc.hwctl = ndfc_enable_hwecc; chip->ecc.calculate = ndfc_calculate_ecc; chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = 256; chip->ecc.bytes = 3; ndfc->mtd.priv = chip; ndfc->mtd.owner = THIS_MODULE; flash_np = of_get_next_child(node, NULL); if (!flash_np) return -ENODEV; ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s", dev_name(&ndfc->ofdev->dev), flash_np->name); if (!ndfc->mtd.name) { ret = -ENOMEM; goto err; } ret = nand_scan(&ndfc->mtd, 1); if (ret) goto err; #ifdef CONFIG_MTD_PARTITIONS ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0); if (ret < 0) goto err; #ifdef CONFIG_MTD_OF_PARTS if (ret == 0) { ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np, &ndfc->parts); if (ret < 0) goto err; } #endif if (ret > 0) ret = add_mtd_partitions(&ndfc->mtd, ndfc->parts, ret); else #endif ret = add_mtd_device(&ndfc->mtd); err: of_node_put(flash_np); if (ret) kfree(ndfc->mtd.name); return ret; } static int __devinit ndfc_probe(struct of_device *ofdev, const struct of_device_id *match) { struct ndfc_controller *ndfc = &ndfc_ctrl; const u32 *reg; u32 ccr; int err, len; spin_lock_init(&ndfc->ndfc_control.lock); init_waitqueue_head(&ndfc->ndfc_control.wq); ndfc->ofdev = ofdev; dev_set_drvdata(&ofdev->dev, ndfc); /* Read the reg property to get the chip select */ reg = of_get_property(ofdev->node, "reg", &len); if (reg == NULL || len != 12) { dev_err(&ofdev->dev, "unable read reg property (%d)\n", len); return -ENOENT; } ndfc->chip_select = reg[0]; ndfc->ndfcbase = of_iomap(ofdev->node, 0); if (!ndfc->ndfcbase) { dev_err(&ofdev->dev, "failed to get memory\n"); return -EIO; } ccr = NDFC_CCR_BS(ndfc->chip_select); /* It is ok if ccr does not exist - just default to 0 */ reg = of_get_property(ofdev->node, "ccr", NULL); if (reg) ccr |= *reg; out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); /* Set the bank settings if given */ reg = of_get_property(ofdev->node, "bank-settings", NULL); if (reg) { int offset = NDFC_BCFG0 + (ndfc->chip_select << 2); out_be32(ndfc->ndfcbase + offset, *reg); } err = ndfc_chip_init(ndfc, ofdev->node); if (err) { iounmap(ndfc->ndfcbase); return err; } return 0; } static int __devexit ndfc_remove(struct of_device *ofdev) { struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); nand_release(&ndfc->mtd); return 0; } static const struct of_device_id ndfc_match[] = { { .compatible = "ibm,ndfc", }, {} }; MODULE_DEVICE_TABLE(of, ndfc_match); static struct of_platform_driver ndfc_driver = { .driver = { .name = "ndfc", }, .match_table = ndfc_match, .probe = ndfc_probe, .remove = __devexit_p(ndfc_remove), }; static int __init ndfc_nand_init(void) { return of_register_platform_driver(&ndfc_driver); } static void __exit ndfc_nand_exit(void) { of_unregister_platform_driver(&ndfc_driver); } module_init(ndfc_nand_init); module_exit(ndfc_nand_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION("OF Platform driver for NDFC");
gpl-2.0
TakisBeskos/u8160-2.6.32.x-kernel
fs/reiserfs/procfs.c
570
16159
/* -*- linux-c -*- */ /* fs/reiserfs/procfs.c */ /* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ /* proc info support a la one created by Sizif@Botik.RU for PGC */ #include <linux/module.h> #include <linux/time.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #include <linux/reiserfs_fs.h> #include <linux/reiserfs_fs_sb.h> #include <linux/init.h> #include <linux/proc_fs.h> #ifdef CONFIG_REISERFS_PROC_INFO /* * LOCKING: * * We rely on new Alexander Viro's super-block locking. * */ static int show_version(struct seq_file *m, struct super_block *sb) { char *format; if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) { format = "3.6"; } else if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_5)) { format = "3.5"; } else { format = "unknown"; } seq_printf(m, "%s format\twith checks %s\n", format, #if defined( CONFIG_REISERFS_CHECK ) "on" #else "off" #endif ); return 0; } int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, int count, int *eof, void *data) { *start = buffer; *eof = 1; return 0; } #define SF( x ) ( r -> x ) #define SFP( x ) SF( s_proc_info_data.x ) #define SFPL( x ) SFP( x[ level ] ) #define SFPF( x ) SFP( scan_bitmap.x ) #define SFPJ( x ) SFP( journal.x ) #define D2C( x ) le16_to_cpu( x ) #define D4C( x ) le32_to_cpu( x ) #define DF( x ) D2C( rs -> s_v1.x ) #define DFL( x ) D4C( rs -> s_v1.x ) #define objectid_map( s, rs ) (old_format_only (s) ? \ (__le32 *)((struct reiserfs_super_block_v1 *)rs + 1) : \ (__le32 *)(rs + 1)) #define MAP( i ) D4C( objectid_map( sb, rs )[ i ] ) #define DJF( x ) le32_to_cpu( rs -> x ) #define DJV( x ) le32_to_cpu( s_v1 -> x ) #define DJP( x ) le32_to_cpu( jp -> x ) #define JF( x ) ( r -> s_journal -> x ) static int show_super(struct seq_file *m, struct super_block *sb) { struct reiserfs_sb_info *r = REISERFS_SB(sb); seq_printf(m, "state: \t%s\n" "mount options: \t%s%s%s%s%s%s%s%s%s%s%s\n" "gen. counter: \t%i\n" "s_disk_reads: \t%i\n" "s_disk_writes: \t%i\n" "s_fix_nodes: \t%i\n" "s_do_balance: \t%i\n" "s_unneeded_left_neighbor: \t%i\n" "s_good_search_by_key_reada: \t%i\n" "s_bmaps: \t%i\n" "s_bmaps_without_search: \t%i\n" "s_direct2indirect: \t%i\n" "s_indirect2direct: \t%i\n" "\n" "max_hash_collisions: \t%i\n" "breads: \t%lu\n" "bread_misses: \t%lu\n" "search_by_key: \t%lu\n" "search_by_key_fs_changed: \t%lu\n" "search_by_key_restarted: \t%lu\n" "insert_item_restarted: \t%lu\n" "paste_into_item_restarted: \t%lu\n" "cut_from_item_restarted: \t%lu\n" "delete_solid_item_restarted: \t%lu\n" "delete_item_restarted: \t%lu\n" "leaked_oid: \t%lu\n" "leaves_removable: \t%lu\n", SF(s_mount_state) == REISERFS_VALID_FS ? "REISERFS_VALID_FS" : "REISERFS_ERROR_FS", reiserfs_r5_hash(sb) ? "FORCE_R5 " : "", reiserfs_rupasov_hash(sb) ? "FORCE_RUPASOV " : "", reiserfs_tea_hash(sb) ? "FORCE_TEA " : "", reiserfs_hash_detect(sb) ? "DETECT_HASH " : "", reiserfs_no_border(sb) ? "NO_BORDER " : "BORDER ", reiserfs_no_unhashed_relocation(sb) ? "NO_UNHASHED_RELOCATION " : "", reiserfs_hashed_relocation(sb) ? "UNHASHED_RELOCATION " : "", reiserfs_test4(sb) ? "TEST4 " : "", have_large_tails(sb) ? "TAILS " : have_small_tails(sb) ? "SMALL_TAILS " : "NO_TAILS ", replay_only(sb) ? "REPLAY_ONLY " : "", convert_reiserfs(sb) ? "CONV " : "", atomic_read(&r->s_generation_counter), SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes), SF(s_do_balance), SF(s_unneeded_left_neighbor), SF(s_good_search_by_key_reada), SF(s_bmaps), SF(s_bmaps_without_search), SF(s_direct2indirect), SF(s_indirect2direct), SFP(max_hash_collisions), SFP(breads), SFP(bread_miss), SFP(search_by_key), SFP(search_by_key_fs_changed), SFP(search_by_key_restarted), SFP(insert_item_restarted), SFP(paste_into_item_restarted), SFP(cut_from_item_restarted), SFP(delete_solid_item_restarted), SFP(delete_item_restarted), SFP(leaked_oid), SFP(leaves_removable)); return 0; } static int show_per_level(struct seq_file *m, struct super_block *sb) { struct reiserfs_sb_info *r = REISERFS_SB(sb); int level; seq_printf(m, "level\t" " balances" " [sbk: reads" " fs_changed" " restarted]" " free space" " items" " can_remove" " lnum" " rnum" " lbytes" " rbytes" " get_neig" " get_neig_res" " need_l_neig" " need_r_neig" "\n"); for (level = 0; level < MAX_HEIGHT; ++level) { seq_printf(m, "%i\t" " %12lu" " %12lu" " %12lu" " %12lu" " %12lu" " %12lu" " %12lu" " %12li" " %12li" " %12li" " %12li" " %12lu" " %12lu" " %12lu" " %12lu" "\n", level, SFPL(balance_at), SFPL(sbk_read_at), SFPL(sbk_fs_changed), SFPL(sbk_restarted), SFPL(free_at), SFPL(items_at), SFPL(can_node_be_removed), SFPL(lnum), SFPL(rnum), SFPL(lbytes), SFPL(rbytes), SFPL(get_neighbors), SFPL(get_neighbors_restart), SFPL(need_l_neighbor), SFPL(need_r_neighbor) ); } return 0; } static int show_bitmap(struct seq_file *m, struct super_block *sb) { struct reiserfs_sb_info *r = REISERFS_SB(sb); seq_printf(m, "free_block: %lu\n" " scan_bitmap:" " wait" " bmap" " retry" " stolen" " journal_hint" "journal_nohint" "\n" " %14lu" " %14lu" " %14lu" " %14lu" " %14lu" " %14lu" " %14lu" "\n", SFP(free_block), SFPF(call), SFPF(wait), SFPF(bmap), SFPF(retry), SFPF(stolen), SFPF(in_journal_hint), SFPF(in_journal_nohint)); return 0; } static int show_on_disk_super(struct seq_file *m, struct super_block *sb) { struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); struct reiserfs_super_block *rs = sb_info->s_rs; int hash_code = DFL(s_hash_function_code); __u32 flags = DJF(s_flags); seq_printf(m, "block_count: \t%i\n" "free_blocks: \t%i\n" "root_block: \t%i\n" "blocksize: \t%i\n" "oid_maxsize: \t%i\n" "oid_cursize: \t%i\n" "umount_state: \t%i\n" "magic: \t%10.10s\n" "fs_state: \t%i\n" "hash: \t%s\n" "tree_height: \t%i\n" "bmap_nr: \t%i\n" "version: \t%i\n" "flags: \t%x[%s]\n" "reserved_for_journal: \t%i\n", DFL(s_block_count), DFL(s_free_blocks), DFL(s_root_block), DF(s_blocksize), DF(s_oid_maxsize), DF(s_oid_cursize), DF(s_umount_state), rs->s_v1.s_magic, DF(s_fs_state), hash_code == TEA_HASH ? "tea" : (hash_code == YURA_HASH) ? "rupasov" : (hash_code == R5_HASH) ? "r5" : (hash_code == UNSET_HASH) ? "unset" : "unknown", DF(s_tree_height), DF(s_bmap_nr), DF(s_version), flags, (flags & reiserfs_attrs_cleared) ? "attrs_cleared" : "", DF(s_reserved_for_journal)); return 0; } static int show_oidmap(struct seq_file *m, struct super_block *sb) { struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); struct reiserfs_super_block *rs = sb_info->s_rs; unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize); unsigned long total_used = 0; int i; for (i = 0; i < mapsize; ++i) { __u32 right; right = (i == mapsize - 1) ? MAX_KEY_OBJECTID : MAP(i + 1); seq_printf(m, "%s: [ %x .. %x )\n", (i & 1) ? "free" : "used", MAP(i), right); if (!(i & 1)) { total_used += right - MAP(i); } } #if defined( REISERFS_USE_OIDMAPF ) if (sb_info->oidmap.use_file && (sb_info->oidmap.mapf != NULL)) { loff_t size = sb_info->oidmap.mapf->f_path.dentry->d_inode->i_size; total_used += size / sizeof(reiserfs_oidinterval_d_t); } #endif seq_printf(m, "total: \t%i [%i/%i] used: %lu [exact]\n", mapsize, mapsize, le16_to_cpu(rs->s_v1.s_oid_maxsize), total_used); return 0; } static int show_journal(struct seq_file *m, struct super_block *sb) { struct reiserfs_sb_info *r = REISERFS_SB(sb); struct reiserfs_super_block *rs = r->s_rs; struct journal_params *jp = &rs->s_v1.s_journal; char b[BDEVNAME_SIZE]; seq_printf(m, /* on-disk fields */ "jp_journal_1st_block: \t%i\n" "jp_journal_dev: \t%s[%x]\n" "jp_journal_size: \t%i\n" "jp_journal_trans_max: \t%i\n" "jp_journal_magic: \t%i\n" "jp_journal_max_batch: \t%i\n" "jp_journal_max_commit_age: \t%i\n" "jp_journal_max_trans_age: \t%i\n" /* incore fields */ "j_1st_reserved_block: \t%i\n" "j_state: \t%li\n" "j_trans_id: \t%u\n" "j_mount_id: \t%lu\n" "j_start: \t%lu\n" "j_len: \t%lu\n" "j_len_alloc: \t%lu\n" "j_wcount: \t%i\n" "j_bcount: \t%lu\n" "j_first_unflushed_offset: \t%lu\n" "j_last_flush_trans_id: \t%u\n" "j_trans_start_time: \t%li\n" "j_list_bitmap_index: \t%i\n" "j_must_wait: \t%i\n" "j_next_full_flush: \t%i\n" "j_next_async_flush: \t%i\n" "j_cnode_used: \t%i\n" "j_cnode_free: \t%i\n" "\n" /* reiserfs_proc_info_data_t.journal fields */ "in_journal: \t%12lu\n" "in_journal_bitmap: \t%12lu\n" "in_journal_reusable: \t%12lu\n" "lock_journal: \t%12lu\n" "lock_journal_wait: \t%12lu\n" "journal_begin: \t%12lu\n" "journal_relock_writers: \t%12lu\n" "journal_relock_wcount: \t%12lu\n" "mark_dirty: \t%12lu\n" "mark_dirty_already: \t%12lu\n" "mark_dirty_notjournal: \t%12lu\n" "restore_prepared: \t%12lu\n" "prepare: \t%12lu\n" "prepare_retry: \t%12lu\n", DJP(jp_journal_1st_block), bdevname(SB_JOURNAL(sb)->j_dev_bd, b), DJP(jp_journal_dev), DJP(jp_journal_size), DJP(jp_journal_trans_max), DJP(jp_journal_magic), DJP(jp_journal_max_batch), SB_JOURNAL(sb)->j_max_commit_age, DJP(jp_journal_max_trans_age), JF(j_1st_reserved_block), JF(j_state), JF(j_trans_id), JF(j_mount_id), JF(j_start), JF(j_len), JF(j_len_alloc), atomic_read(&r->s_journal->j_wcount), JF(j_bcount), JF(j_first_unflushed_offset), JF(j_last_flush_trans_id), JF(j_trans_start_time), JF(j_list_bitmap_index), JF(j_must_wait), JF(j_next_full_flush), JF(j_next_async_flush), JF(j_cnode_used), JF(j_cnode_free), SFPJ(in_journal), SFPJ(in_journal_bitmap), SFPJ(in_journal_reusable), SFPJ(lock_journal), SFPJ(lock_journal_wait), SFPJ(journal_being), SFPJ(journal_relock_writers), SFPJ(journal_relock_wcount), SFPJ(mark_dirty), SFPJ(mark_dirty_already), SFPJ(mark_dirty_notjournal), SFPJ(restore_prepared), SFPJ(prepare), SFPJ(prepare_retry) ); return 0; } /* iterator */ static int test_sb(struct super_block *sb, void *data) { return data == sb; } static int set_sb(struct super_block *sb, void *data) { return -ENOENT; } static void *r_start(struct seq_file *m, loff_t * pos) { struct proc_dir_entry *de = m->private; struct super_block *s = de->parent->data; loff_t l = *pos; if (l) return NULL; if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, s))) return NULL; up_write(&s->s_umount); return s; } static void *r_next(struct seq_file *m, void *v, loff_t * pos) { ++*pos; if (v) deactivate_super(v); return NULL; } static void r_stop(struct seq_file *m, void *v) { if (v) deactivate_super(v); } static int r_show(struct seq_file *m, void *v) { struct proc_dir_entry *de = m->private; int (*show) (struct seq_file *, struct super_block *) = de->data; return show(m, v); } static const struct seq_operations r_ops = { .start = r_start, .next = r_next, .stop = r_stop, .show = r_show, }; static int r_open(struct inode *inode, struct file *file) { int ret = seq_open(file, &r_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = PDE(inode); } return ret; } static const struct file_operations r_file_operations = { .open = r_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; static struct proc_dir_entry *proc_info_root = NULL; static const char proc_info_root_name[] = "fs/reiserfs"; static void add_file(struct super_block *sb, char *name, int (*func) (struct seq_file *, struct super_block *)) { proc_create_data(name, 0, REISERFS_SB(sb)->procdir, &r_file_operations, func); } int reiserfs_proc_info_init(struct super_block *sb) { char b[BDEVNAME_SIZE]; char *s; /* Some block devices use /'s */ strlcpy(b, reiserfs_bdevname(sb), BDEVNAME_SIZE); s = strchr(b, '/'); if (s) *s = '!'; spin_lock_init(&__PINFO(sb).lock); REISERFS_SB(sb)->procdir = proc_mkdir(b, proc_info_root); if (REISERFS_SB(sb)->procdir) { REISERFS_SB(sb)->procdir->data = sb; add_file(sb, "version", show_version); add_file(sb, "super", show_super); add_file(sb, "per-level", show_per_level); add_file(sb, "bitmap", show_bitmap); add_file(sb, "on-disk-super", show_on_disk_super); add_file(sb, "oidmap", show_oidmap); add_file(sb, "journal", show_journal); return 0; } reiserfs_warning(sb, "cannot create /proc/%s/%s", proc_info_root_name, b); return 1; } int reiserfs_proc_info_done(struct super_block *sb) { struct proc_dir_entry *de = REISERFS_SB(sb)->procdir; char b[BDEVNAME_SIZE]; char *s; /* Some block devices use /'s */ strlcpy(b, reiserfs_bdevname(sb), BDEVNAME_SIZE); s = strchr(b, '/'); if (s) *s = '!'; if (de) { remove_proc_entry("journal", de); remove_proc_entry("oidmap", de); remove_proc_entry("on-disk-super", de); remove_proc_entry("bitmap", de); remove_proc_entry("per-level", de); remove_proc_entry("super", de); remove_proc_entry("version", de); } spin_lock(&__PINFO(sb).lock); __PINFO(sb).exiting = 1; spin_unlock(&__PINFO(sb).lock); if (proc_info_root) { remove_proc_entry(b, proc_info_root); REISERFS_SB(sb)->procdir = NULL; } return 0; } struct proc_dir_entry *reiserfs_proc_register_global(char *name, read_proc_t * func) { return (proc_info_root) ? create_proc_read_entry(name, 0, proc_info_root, func, NULL) : NULL; } void reiserfs_proc_unregister_global(const char *name) { remove_proc_entry(name, proc_info_root); } int reiserfs_proc_info_global_init(void) { if (proc_info_root == NULL) { proc_info_root = proc_mkdir(proc_info_root_name, NULL); if (!proc_info_root) { reiserfs_warning(NULL, "cannot create /proc/%s", proc_info_root_name); return 1; } } return 0; } int reiserfs_proc_info_global_done(void) { if (proc_info_root != NULL) { proc_info_root = NULL; remove_proc_entry(proc_info_root_name, NULL); } return 0; } /* REISERFS_PROC_INFO */ #else int reiserfs_proc_info_init(struct super_block *sb) { return 0; } int reiserfs_proc_info_done(struct super_block *sb) { return 0; } struct proc_dir_entry *reiserfs_proc_register_global(char *name, read_proc_t * func) { return NULL; } void reiserfs_proc_unregister_global(const char *name) {; } int reiserfs_proc_info_global_init(void) { return 0; } int reiserfs_proc_info_global_done(void) { return 0; } int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, int count, int *eof, void *data) { return 0; } /* REISERFS_PROC_INFO */ #endif /* * Revision 1.1.8.2 2001/07/15 17:08:42 god * . use get_super() in procfs.c * . remove remove_save_link() from reiserfs_do_truncate() * * I accept terms and conditions stated in the Legal Agreement * (available at http://www.namesys.com/legalese.html) * * Revision 1.1.8.1 2001/07/11 16:48:50 god * proc info support * * I accept terms and conditions stated in the Legal Agreement * (available at http://www.namesys.com/legalese.html) * */ /* * Make Linus happy. * Local variables: * c-indentation-style: "K&R" * mode-name: "LC" * c-basic-offset: 8 * tab-width: 8 * End: */
gpl-2.0
Ankarrr/Linux-2.6.32.60-rts
drivers/net/phy/bcm63xx.c
570
3386
/* * Driver for Broadcom 63xx SOCs integrated PHYs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/phy.h> #define MII_BCM63XX_IR 0x1a /* interrupt register */ #define MII_BCM63XX_IR_EN 0x4000 /* global interrupt enable */ #define MII_BCM63XX_IR_DUPLEX 0x0800 /* duplex changed */ #define MII_BCM63XX_IR_SPEED 0x0400 /* speed changed */ #define MII_BCM63XX_IR_LINK 0x0200 /* link changed */ #define MII_BCM63XX_IR_GMASK 0x0100 /* global interrupt mask */ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver"); MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); MODULE_LICENSE("GPL"); static int bcm63xx_config_init(struct phy_device *phydev) { int reg, err; reg = phy_read(phydev, MII_BCM63XX_IR); if (reg < 0) return reg; /* Mask interrupts globally. */ reg |= MII_BCM63XX_IR_GMASK; err = phy_write(phydev, MII_BCM63XX_IR, reg); if (err < 0) return err; /* Unmask events we are interested in */ reg = ~(MII_BCM63XX_IR_DUPLEX | MII_BCM63XX_IR_SPEED | MII_BCM63XX_IR_LINK) | MII_BCM63XX_IR_EN; err = phy_write(phydev, MII_BCM63XX_IR, reg); if (err < 0) return err; return 0; } static int bcm63xx_ack_interrupt(struct phy_device *phydev) { int reg; /* Clear pending interrupts. */ reg = phy_read(phydev, MII_BCM63XX_IR); if (reg < 0) return reg; return 0; } static int bcm63xx_config_intr(struct phy_device *phydev) { int reg, err; reg = phy_read(phydev, MII_BCM63XX_IR); if (reg < 0) return reg; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) reg &= ~MII_BCM63XX_IR_GMASK; else reg |= MII_BCM63XX_IR_GMASK; err = phy_write(phydev, MII_BCM63XX_IR, reg); return err; } static struct phy_driver bcm63xx_1_driver = { .phy_id = 0x00406000, .phy_id_mask = 0xfffffc00, .name = "Broadcom BCM63XX (1)", /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_INTERRUPT, .config_init = bcm63xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm63xx_ack_interrupt, .config_intr = bcm63xx_config_intr, .driver = { .owner = THIS_MODULE }, }; /* same phy as above, with just a different OUI */ static struct phy_driver bcm63xx_2_driver = { .phy_id = 0x002bdc00, .phy_id_mask = 0xfffffc00, .name = "Broadcom BCM63XX (2)", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_INTERRUPT, .config_init = bcm63xx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = bcm63xx_ack_interrupt, .config_intr = bcm63xx_config_intr, .driver = { .owner = THIS_MODULE }, }; static int __init bcm63xx_phy_init(void) { int ret; ret = phy_driver_register(&bcm63xx_1_driver); if (ret) goto out_63xx_1; ret = phy_driver_register(&bcm63xx_2_driver); if (ret) goto out_63xx_2; return ret; out_63xx_2: phy_driver_unregister(&bcm63xx_1_driver); out_63xx_1: return ret; } static void __exit bcm63xx_phy_exit(void) { phy_driver_unregister(&bcm63xx_1_driver); phy_driver_unregister(&bcm63xx_2_driver); } module_init(bcm63xx_phy_init); module_exit(bcm63xx_phy_exit);
gpl-2.0
Kra1o5/android_kernel_bq_rk3066
drivers/usb/host/ehci-pci.c
826
16049
/* * EHCI HCD (Host Controller Driver) PCI Bus Glue. * * Copyright (c) 2000-2004 by David Brownell * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef CONFIG_PCI #error "This file is PCI bus glue. CONFIG_PCI must be defined." #endif /* defined here to avoid adding to pci_ids.h for single instance use */ #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 /*-------------------------------------------------------------------------*/ /* called after powerup, by probe or system-pm "wakeup" */ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) { int retval; /* we expect static quirk code to handle the "extended capabilities" * (currently just BIOS handoff) allowed starting with EHCI 0.96 */ /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ retval = pci_set_mwi(pdev); if (!retval) ehci_dbg(ehci, "MWI active\n"); return 0; } /* called during probe() after chip reset completes */ static int ehci_pci_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); struct pci_dev *p_smbus; u8 rev; u32 temp; int retval; switch (pdev->vendor) { case PCI_VENDOR_ID_TOSHIBA_2: /* celleb's companion chip */ if (pdev->device == 0x01b5) { #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO ehci->big_endian_mmio = 1; #else ehci_warn(ehci, "unsupported big endian Toshiba quirk\n"); #endif } break; } ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); /* ehci_init() causes memory for DMA transfers to be * allocated. Thus, any vendor-specific workarounds based on * limiting the type of memory used for DMA transfers must * happen before ehci_init() is called. */ switch (pdev->vendor) { case PCI_VENDOR_ID_NVIDIA: /* NVidia reports that certain chips don't handle * QH, ITD, or SITD addresses above 2GB. (But TD, * data buffer, and periodic schedule are normal.) */ switch (pdev->device) { case 0x003c: /* MCP04 */ case 0x005b: /* CK804 */ case 0x00d8: /* CK8 */ case 0x00e8: /* CK8S */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(31)) < 0) ehci_warn(ehci, "can't enable NVidia " "workaround for >2GB RAM\n"); break; } break; } /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); retval = ehci_halt(ehci); if (retval) return retval; if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) || (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) { /* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may * read/write memory space which does not belong to it when * there is NULL pointer with T-bit set to 1 in the frame list * table. To avoid the issue, the frame list link pointer * should always contain a valid pointer to a inactive qh. */ ehci->use_dummy_qh = 1; ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI " "dummy qh workaround\n"); } /* data structure init */ retval = ehci_init(hcd); if (retval) return retval; switch (pdev->vendor) { case PCI_VENDOR_ID_NEC: ehci->need_io_watchdog = 0; break; case PCI_VENDOR_ID_INTEL: ehci->need_io_watchdog = 0; ehci->fs_i_thresh = 1; if (pdev->device == 0x27cc) { ehci->broken_periodic = 1; ehci_info(ehci, "using broken periodic workaround\n"); } if (pdev->device == 0x0806 || pdev->device == 0x0811 || pdev->device == 0x0829) { ehci_info(ehci, "disable lpm for langwell/penwell\n"); ehci->has_lpm = 0; } if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) { hcd->has_tt = 1; tdi_reset(ehci); } break; case PCI_VENDOR_ID_TDI: if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { hcd->has_tt = 1; tdi_reset(ehci); } break; case PCI_VENDOR_ID_AMD: /* AMD PLL quirk */ if (usb_amd_find_chipset_info()) ehci->amd_pll_fix = 1; /* AMD8111 EHCI doesn't work, according to AMD errata */ if (pdev->device == 0x7463) { ehci_info(ehci, "ignoring AMD8111 (errata)\n"); retval = -EIO; goto done; } break; case PCI_VENDOR_ID_NVIDIA: switch (pdev->device) { /* Some NForce2 chips have problems with selective suspend; * fixed in newer silicon. */ case 0x0068: if (pdev->revision < 0xa4) ehci->no_selective_suspend = 1; break; /* MCP89 chips on the MacBookAir3,1 give EPROTO when * fetching device descriptors unless LPM is disabled. * There are also intermittent problems enumerating * devices with PPCD enabled. */ case 0x0d9d: ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89"); ehci->has_lpm = 0; ehci->has_ppcd = 0; ehci->command &= ~CMD_PPCEE; break; } break; case PCI_VENDOR_ID_VIA: if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) { u8 tmp; /* The VT6212 defaults to a 1 usec EHCI sleep time which * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes * that sleep time use the conventional 10 usec. */ pci_read_config_byte(pdev, 0x4b, &tmp); if (tmp & 0x20) break; pci_write_config_byte(pdev, 0x4b, tmp | 0x20); } break; case PCI_VENDOR_ID_ATI: /* AMD PLL quirk */ if (usb_amd_find_chipset_info()) ehci->amd_pll_fix = 1; /* SB600 and old version of SB700 have a bug in EHCI controller, * which causes usb devices lose response in some cases. */ if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) { p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); if (!p_smbus) break; rev = p_smbus->revision; if ((pdev->device == 0x4386) || (rev == 0x3a) || (rev == 0x3b)) { u8 tmp; ehci_info(ehci, "applying AMD SB600/SB700 USB " "freeze workaround\n"); pci_read_config_byte(pdev, 0x53, &tmp); pci_write_config_byte(pdev, 0x53, tmp | (1<<3)); } pci_dev_put(p_smbus); } break; case PCI_VENDOR_ID_NETMOS: /* MosChip frame-index-register bug */ ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; } /* optional debug port, normally in the first BAR */ temp = pci_find_capability(pdev, 0x0a); if (temp) { pci_read_config_dword(pdev, temp, &temp); temp >>= 16; if ((temp & (3 << 13)) == (1 << 13)) { temp &= 0x1fff; ehci->debug = ehci_to_hcd(ehci)->regs + temp; temp = ehci_readl(ehci, &ehci->debug->control); ehci_info(ehci, "debug port %d%s\n", HCS_DEBUG_PORT(ehci->hcs_params), (temp & DBGP_ENABLED) ? " IN USE" : ""); if (!(temp & DBGP_ENABLED)) ehci->debug = NULL; } } ehci_reset(ehci); /* at least the Genesys GL880S needs fixup here */ temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); temp &= 0x0f; if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) { ehci_dbg(ehci, "bogus port configuration: " "cc=%d x pcc=%d < ports=%d\n", HCS_N_CC(ehci->hcs_params), HCS_N_PCC(ehci->hcs_params), HCS_N_PORTS(ehci->hcs_params)); switch (pdev->vendor) { case 0x17a0: /* GENESYS */ /* GL880S: should be PORTS=2 */ temp |= (ehci->hcs_params & ~0xf); ehci->hcs_params = temp; break; case PCI_VENDOR_ID_NVIDIA: /* NF4: should be PCC=10 */ break; } } /* Serial Bus Release Number is at PCI 0x60 offset */ pci_read_config_byte(pdev, 0x60, &ehci->sbrn); /* Keep this around for a while just in case some EHCI * implementation uses legacy PCI PM support. This test * can be removed on 17 Dec 2009 if the dev_warn() hasn't * been triggered by then. */ if (!device_can_wakeup(&pdev->dev)) { u16 port_wake; pci_read_config_word(pdev, 0x62, &port_wake); if (port_wake & 0x0001) { dev_warn(&pdev->dev, "Enabling legacy PCI PM\n"); device_set_wakeup_capable(&pdev->dev, 1); } } #ifdef CONFIG_USB_SUSPEND /* REVISIT: the controller works fine for wakeup iff the root hub * itself is "globally" suspended, but usbcore currently doesn't * understand such things. * * System suspend currently expects to be able to suspend the entire * device tree, device-at-a-time. If we failed selective suspend * reports, system suspend would fail; so the root hub code must claim * success. That's lying to usbcore, and it matters for runtime * PM scenarios with selective suspend and remote wakeup... */ if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) ehci_warn(ehci, "selective suspend/wakeup unavailable\n"); #endif ehci_port_power(ehci, 1); retval = ehci_pci_reinit(ehci, pdev); done: return retval; } /*-------------------------------------------------------------------------*/ #ifdef CONFIG_PM /* suspend/resume, section 4.3 */ /* These routines rely on the PCI bus glue * to handle powerdown and wakeup, and currently also on * transceivers that don't need any software attention to set up * the right sort of wakeup. * Also they depend on separate root hub suspend/resume. */ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); unsigned long flags; int rc = 0; if (time_before(jiffies, ehci->next_statechange)) msleep(10); /* Root hub was already suspended. Disable irq emission and * mark HW unaccessible. The PM and USB cores make sure that * the root hub is either suspended or stopped. */ ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); spin_lock_irqsave (&ehci->lock, flags); ehci_writel(ehci, 0, &ehci->regs->intr_enable); (void)ehci_readl(ehci, &ehci->regs->intr_enable); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); spin_unlock_irqrestore (&ehci->lock, flags); // could save FLADJ in case of Vaux power loss // ... we'd only use it to handle clock skew return rc; } static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev) { return pdev->class == PCI_CLASS_SERIAL_USB_EHCI && pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == 0x1E26 || pdev->device == 0x8C2D || pdev->device == 0x8C26 || pdev->device == 0x9C26); } static void ehci_enable_xhci_companion(void) { struct pci_dev *companion = NULL; /* The xHCI and EHCI controllers are not on the same PCI slot */ for_each_pci_dev(companion) { if (!usb_is_intel_switchable_xhci(companion)) continue; usb_enable_xhci_ports(companion); return; } } static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); /* The BIOS on systems with the Intel Panther Point chipset may or may * not support xHCI natively. That means that during system resume, it * may switch the ports back to EHCI so that users can use their * keyboard to select a kernel from GRUB after resume from hibernate. * * The BIOS is supposed to remember whether the OS had xHCI ports * enabled before resume, and switch the ports back to xHCI when the * BIOS/OS semaphore is written, but we all know we can't trust BIOS * writers. * * Unconditionally switch the ports back to xHCI after a system resume. * We can't tell whether the EHCI or xHCI controller will be resumed * first, so we have to do the port switchover in both drivers. Writing * a '1' to the port switchover registers should have no effect if the * port was already switched over. */ if (usb_is_intel_switchable_ehci(pdev)) ehci_enable_xhci_companion(); // maybe restore FLADJ if (time_before(jiffies, ehci->next_statechange)) msleep(100); /* Mark hardware accessible again as we are out of D3 state by now */ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); /* If CF is still set and we aren't resuming from hibernation * then we maintained PCI Vaux power. * Just undo the effect of ehci_pci_suspend(). */ if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF && !hibernated) { int mask = INTR_MASK; ehci_prepare_ports_for_controller_resume(ehci); if (!hcd->self.root_hub->do_remote_wakeup) mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); ehci_readl(ehci, &ehci->regs->intr_enable); return 0; } usb_root_hub_lost_power(hcd->self.root_hub); /* Else reset, to cope with power loss or flush-to-storage * style "resume" having let BIOS kick in during reboot. */ (void) ehci_halt(ehci); (void) ehci_reset(ehci); (void) ehci_pci_reinit(ehci, pdev); /* emptying the schedule aborts any urbs */ spin_lock_irq(&ehci->lock); if (ehci->reclaim) end_unlink_async(ehci); ehci_work(ehci); spin_unlock_irq(&ehci->lock); ehci_writel(ehci, ehci->command, &ehci->regs->command); ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ /* here we "know" root ports should always stay powered */ ehci_port_power(ehci, 1); hcd->state = HC_STATE_SUSPENDED; return 0; } #endif static int ehci_update_device(struct usb_hcd *hcd, struct usb_device *udev) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); int rc = 0; if (!udev->parent) /* udev is root hub itself, impossible */ rc = -1; /* we only support lpm device connected to root hub yet */ if (ehci->has_lpm && !udev->parent->parent) { rc = ehci_lpm_set_da(ehci, udev->devnum, udev->portnum); if (!rc) rc = ehci_lpm_check(ehci, udev->portnum); } return rc; } static const struct hc_driver ehci_pci_hc_driver = { .description = hcd_name, .product_desc = "EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_MEMORY | HCD_USB2, /* * basic lifecycle operations */ .reset = ehci_pci_setup, .start = ehci_run, #ifdef CONFIG_PM .pci_suspend = ehci_pci_suspend, .pci_resume = ehci_pci_resume, #endif .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, /* * call back when device connected and addressed */ .update_device = ehci_update_device, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; /*-------------------------------------------------------------------------*/ /* PCI driver selection metadata; PCI hotplugging uses this */ static const struct pci_device_id pci_ids [] = { { /* handle any USB 2.0 EHCI controller */ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0), .driver_data = (unsigned long) &ehci_pci_hc_driver, }, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, pci_ids); /* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver ehci_pci_driver = { .name = (char *) hcd_name, .id_table = pci_ids, .probe = usb_hcd_pci_probe, .remove = usb_hcd_pci_remove, .shutdown = usb_hcd_pci_shutdown, #ifdef CONFIG_PM_SLEEP .driver = { .pm = &usb_hcd_pci_pm_ops }, #endif };
gpl-2.0
RomanHargrave/pf-kernel
drivers/input/keyboard/nomadik-ske-keypad.c
826
10853
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson * * License terms:GNU General Public License (GPL) version 2 * * Keypad controller driver for the SKE (Scroll Key Encoder) module used in * the Nomadik 8815 and Ux500 platforms. */ #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/platform_data/keypad-nomadik-ske.h> /* SKE_CR bits */ #define SKE_KPMLT (0x1 << 6) #define SKE_KPCN (0x7 << 3) #define SKE_KPASEN (0x1 << 2) #define SKE_KPASON (0x1 << 7) /* SKE_IMSC bits */ #define SKE_KPIMA (0x1 << 2) /* SKE_ICR bits */ #define SKE_KPICS (0x1 << 3) #define SKE_KPICA (0x1 << 2) /* SKE_RIS bits */ #define SKE_KPRISA (0x1 << 2) #define SKE_KEYPAD_ROW_SHIFT 3 #define SKE_KPD_NUM_ROWS 8 #define SKE_KPD_NUM_COLS 8 /* keypad auto scan registers */ #define SKE_ASR0 0x20 #define SKE_ASR1 0x24 #define SKE_ASR2 0x28 #define SKE_ASR3 0x2C #define SKE_NUM_ASRX_REGISTERS (4) #define KEY_PRESSED_DELAY 10 /** * struct ske_keypad - data structure used by keypad driver * @irq: irq no * @reg_base: ske regsiters base address * @input: pointer to input device object * @board: keypad platform device * @keymap: matrix scan code table for keycodes * @clk: clock structure pointer */ struct ske_keypad { int irq; void __iomem *reg_base; struct input_dev *input; const struct ske_keypad_platform_data *board; unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS]; struct clk *clk; struct clk *pclk; spinlock_t ske_keypad_lock; }; static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr, u8 mask, u8 data) { u32 ret; spin_lock(&keypad->ske_keypad_lock); ret = readl(keypad->reg_base + addr); ret &= ~mask; ret |= data; writel(ret, keypad->reg_base + addr); spin_unlock(&keypad->ske_keypad_lock); } /* * ske_keypad_chip_init: init keypad controller configuration * * Enable Multi key press detection, auto scan mode */ static int __init ske_keypad_chip_init(struct ske_keypad *keypad) { u32 value; int timeout = keypad->board->debounce_ms; /* check SKE_RIS to be 0 */ while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--) cpu_relax(); if (!timeout) return -EINVAL; /* * set debounce value * keypad dbounce is configured in DBCR[15:8] * dbounce value in steps of 32/32.768 ms */ spin_lock(&keypad->ske_keypad_lock); value = readl(keypad->reg_base + SKE_DBCR); value = value & 0xff; value |= ((keypad->board->debounce_ms * 32000)/32768) << 8; writel(value, keypad->reg_base + SKE_DBCR); spin_unlock(&keypad->ske_keypad_lock); /* enable multi key detection */ ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT); /* * set up the number of columns * KPCN[5:3] defines no. of keypad columns to be auto scanned */ value = (keypad->board->kcol - 1) << 3; ske_keypad_set_bits(keypad, SKE_CR, SKE_KPCN, value); /* clear keypad interrupt for auto(and pending SW) scans */ ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA | SKE_KPICS); /* un-mask keypad interrupts */ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); /* enable automatic scan */ ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPASEN); return 0; } static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col) { int row = 0, code, pos; struct input_dev *input = keypad->input; u32 ske_ris; int key_pressed; int num_of_rows; /* find out the row */ num_of_rows = hweight8(status); do { pos = __ffs(status); row = pos; status &= ~(1 << pos); code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT); ske_ris = readl(keypad->reg_base + SKE_RIS); key_pressed = ske_ris & SKE_KPRISA; input_event(input, EV_MSC, MSC_SCAN, code); input_report_key(input, keypad->keymap[code], key_pressed); input_sync(input); num_of_rows--; } while (num_of_rows); } static void ske_keypad_read_data(struct ske_keypad *keypad) { u8 status; int col = 0; int ske_asr, i; /* * Read the auto scan registers * * Each SKE_ASRx (x=0 to x=3) contains two row values. * lower byte contains row value for column 2*x, * upper byte contains row value for column 2*x + 1 */ for (i = 0; i < SKE_NUM_ASRX_REGISTERS; i++) { ske_asr = readl(keypad->reg_base + SKE_ASR0 + (4 * i)); if (!ske_asr) continue; /* now that ASRx is zero, find out the coloumn x and row y */ status = ske_asr & 0xff; if (status) { col = i * 2; ske_keypad_report(keypad, status, col); } status = (ske_asr & 0xff00) >> 8; if (status) { col = (i * 2) + 1; ske_keypad_report(keypad, status, col); } } } static irqreturn_t ske_keypad_irq(int irq, void *dev_id) { struct ske_keypad *keypad = dev_id; int timeout = keypad->board->debounce_ms; /* disable auto scan interrupt; mask the interrupt generated */ ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0); ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA); while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --timeout) cpu_relax(); /* SKEx registers are stable and can be read */ ske_keypad_read_data(keypad); /* wait until raw interrupt is clear */ while ((readl(keypad->reg_base + SKE_RIS)) && --timeout) msleep(KEY_PRESSED_DELAY); /* enable auto scan interrupts */ ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); return IRQ_HANDLED; } static int __init ske_keypad_probe(struct platform_device *pdev) { const struct ske_keypad_platform_data *plat = dev_get_platdata(&pdev->dev); struct ske_keypad *keypad; struct input_dev *input; struct resource *res; int irq; int error; if (!plat) { dev_err(&pdev->dev, "invalid keypad platform data\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "failed to get keypad irq\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "missing platform resources\n"); return -EINVAL; } keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL); input = input_allocate_device(); if (!keypad || !input) { dev_err(&pdev->dev, "failed to allocate keypad memory\n"); error = -ENOMEM; goto err_free_mem; } keypad->irq = irq; keypad->board = plat; keypad->input = input; spin_lock_init(&keypad->ske_keypad_lock); if (!request_mem_region(res->start, resource_size(res), pdev->name)) { dev_err(&pdev->dev, "failed to request I/O memory\n"); error = -EBUSY; goto err_free_mem; } keypad->reg_base = ioremap(res->start, resource_size(res)); if (!keypad->reg_base) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); error = -ENXIO; goto err_free_mem_region; } keypad->pclk = clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(keypad->pclk)) { dev_err(&pdev->dev, "failed to get pclk\n"); error = PTR_ERR(keypad->pclk); goto err_iounmap; } keypad->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(keypad->clk)) { dev_err(&pdev->dev, "failed to get clk\n"); error = PTR_ERR(keypad->clk); goto err_pclk; } input->id.bustype = BUS_HOST; input->name = "ux500-ske-keypad"; input->dev.parent = &pdev->dev; error = matrix_keypad_build_keymap(plat->keymap_data, NULL, SKE_KPD_NUM_ROWS, SKE_KPD_NUM_COLS, keypad->keymap, input); if (error) { dev_err(&pdev->dev, "Failed to build keymap\n"); goto err_clk; } input_set_capability(input, EV_MSC, MSC_SCAN); if (!plat->no_autorepeat) __set_bit(EV_REP, input->evbit); error = clk_prepare_enable(keypad->pclk); if (error) { dev_err(&pdev->dev, "Failed to prepare/enable pclk\n"); goto err_clk; } error = clk_prepare_enable(keypad->clk); if (error) { dev_err(&pdev->dev, "Failed to prepare/enable clk\n"); goto err_pclk_disable; } /* go through board initialization helpers */ if (keypad->board->init) keypad->board->init(); error = ske_keypad_chip_init(keypad); if (error) { dev_err(&pdev->dev, "unable to init keypad hardware\n"); goto err_clk_disable; } error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq, IRQF_ONESHOT, "ske-keypad", keypad); if (error) { dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq); goto err_clk_disable; } error = input_register_device(input); if (error) { dev_err(&pdev->dev, "unable to register input device: %d\n", error); goto err_free_irq; } if (plat->wakeup_enable) device_init_wakeup(&pdev->dev, true); platform_set_drvdata(pdev, keypad); return 0; err_free_irq: free_irq(keypad->irq, keypad); err_clk_disable: clk_disable_unprepare(keypad->clk); err_pclk_disable: clk_disable_unprepare(keypad->pclk); err_clk: clk_put(keypad->clk); err_pclk: clk_put(keypad->pclk); err_iounmap: iounmap(keypad->reg_base); err_free_mem_region: release_mem_region(res->start, resource_size(res)); err_free_mem: input_free_device(input); kfree(keypad); return error; } static int ske_keypad_remove(struct platform_device *pdev) { struct ske_keypad *keypad = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); free_irq(keypad->irq, keypad); input_unregister_device(keypad->input); clk_disable_unprepare(keypad->clk); clk_put(keypad->clk); if (keypad->board->exit) keypad->board->exit(); iounmap(keypad->reg_base); release_mem_region(res->start, resource_size(res)); kfree(keypad); return 0; } #ifdef CONFIG_PM_SLEEP static int ske_keypad_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ske_keypad *keypad = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); if (device_may_wakeup(dev)) enable_irq_wake(irq); else ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0); return 0; } static int ske_keypad_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct ske_keypad *keypad = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); if (device_may_wakeup(dev)) disable_irq_wake(irq); else ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); return 0; } #endif static SIMPLE_DEV_PM_OPS(ske_keypad_dev_pm_ops, ske_keypad_suspend, ske_keypad_resume); static struct platform_driver ske_keypad_driver = { .driver = { .name = "nmk-ske-keypad", .pm = &ske_keypad_dev_pm_ops, }, .remove = ske_keypad_remove, }; module_platform_driver_probe(ske_keypad_driver, ske_keypad_probe); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>"); MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver"); MODULE_ALIAS("platform:nomadik-ske-keypad");
gpl-2.0
baberthal/linux
arch/arm/mach-s3c24xx/mach-h1940.c
826
19252
/* * Copyright (c) 2003-2005 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * http://www.handhelds.org/projects/h1940.html * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/memblock.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/device.h> #include <linux/serial_core.h> #include <linux/serial_s3c.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/pwm_backlight.h> #include <linux/i2c.h> #include <linux/leds.h> #include <linux/pda_power.h> #include <linux/s3c_adc_battery.h> #include <linux/delay.h> #include <video/platform_lcd.h> #include <linux/mmc/host.h> #include <linux/export.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <linux/platform_data/i2c-s3c2410.h> #include <linux/platform_data/mmc-s3cmci.h> #include <linux/platform_data/touchscreen-s3c2410.h> #include <linux/platform_data/usb-s3c2410_udc.h> #include <sound/uda1380.h> #include <mach/fb.h> #include <mach/hardware.h> #include <mach/regs-clock.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> #include <mach/gpio-samsung.h> #include <plat/cpu.h> #include <plat/devs.h> #include <plat/gpio-cfg.h> #include <plat/pm.h> #include <plat/samsung-time.h> #include "common.h" #include "h1940.h" #define H1940_LATCH ((void __force __iomem *)0xF8000000) #define H1940_PA_LATCH S3C2410_CS2 #define H1940_LATCH_BIT(x) (1 << ((x) + 16 - S3C_GPIO_END)) #define S3C24XX_PLL_MDIV_SHIFT (12) #define S3C24XX_PLL_PDIV_SHIFT (4) #define S3C24XX_PLL_SDIV_SHIFT (0) static struct map_desc h1940_iodesc[] __initdata = { [0] = { .virtual = (unsigned long)H1940_LATCH, .pfn = __phys_to_pfn(H1940_PA_LATCH), .length = SZ_16K, .type = MT_DEVICE }, }; #define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB #define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE static struct s3c2410_uartcfg h1940_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, [1] = { .hwport = 1, .flags = 0, .ucon = 0x245, .ulcon = 0x03, .ufcon = 0x00, }, /* IR port */ [2] = { .hwport = 2, .flags = 0, .uart_flags = UPF_CONS_FLOW, .ucon = 0x3c5, .ulcon = 0x43, .ufcon = 0x51, } }; /* Board control latch control */ static unsigned int latch_state; static void h1940_latch_control(unsigned int clear, unsigned int set) { unsigned long flags; local_irq_save(flags); latch_state &= ~clear; latch_state |= set; __raw_writel(latch_state, H1940_LATCH); local_irq_restore(flags); } static inline int h1940_gpiolib_to_latch(int offset) { return 1 << (offset + 16); } static void h1940_gpiolib_latch_set(struct gpio_chip *chip, unsigned offset, int value) { int latch_bit = h1940_gpiolib_to_latch(offset); h1940_latch_control(value ? 0 : latch_bit, value ? latch_bit : 0); } static int h1940_gpiolib_latch_output(struct gpio_chip *chip, unsigned offset, int value) { h1940_gpiolib_latch_set(chip, offset, value); return 0; } static int h1940_gpiolib_latch_get(struct gpio_chip *chip, unsigned offset) { return (latch_state >> (offset + 16)) & 1; } static struct gpio_chip h1940_latch_gpiochip = { .base = H1940_LATCH_GPIO(0), .owner = THIS_MODULE, .label = "H1940_LATCH", .ngpio = 16, .direction_output = h1940_gpiolib_latch_output, .set = h1940_gpiolib_latch_set, .get = h1940_gpiolib_latch_get, }; static struct s3c2410_udc_mach_info h1940_udc_cfg __initdata = { .vbus_pin = S3C2410_GPG(5), .vbus_pin_inverted = 1, .pullup_pin = H1940_LATCH_USB_DP, }; static struct s3c2410_ts_mach_info h1940_ts_cfg __initdata = { .delay = 10000, .presc = 49, .oversampling_shift = 2, .cfg_gpio = s3c24xx_ts_cfg_gpio, }; /** * Set lcd on or off **/ static struct s3c2410fb_display h1940_lcd __initdata = { .lcdcon5= S3C2410_LCDCON5_FRM565 | \ S3C2410_LCDCON5_INVVLINE | \ S3C2410_LCDCON5_HWSWP, .type = S3C2410_LCDCON1_TFT, .width = 240, .height = 320, .pixclock = 260000, .xres = 240, .yres = 320, .bpp = 16, .left_margin = 8, .right_margin = 20, .hsync_len = 4, .upper_margin = 8, .lower_margin = 7, .vsync_len = 1, }; static struct s3c2410fb_mach_info h1940_fb_info __initdata = { .displays = &h1940_lcd, .num_displays = 1, .default_display = 0, .lpcsel = 0x02, .gpccon = 0xaa940659, .gpccon_mask = 0xffffc0f0, .gpcup = 0x0000ffff, .gpcup_mask = 0xffffffff, .gpdcon = 0xaa84aaa0, .gpdcon_mask = 0xffffffff, .gpdup = 0x0000faff, .gpdup_mask = 0xffffffff, }; static int power_supply_init(struct device *dev) { return gpio_request(S3C2410_GPF(2), "cable plugged"); } static int h1940_is_ac_online(void) { return !gpio_get_value(S3C2410_GPF(2)); } static void power_supply_exit(struct device *dev) { gpio_free(S3C2410_GPF(2)); } static char *h1940_supplicants[] = { "main-battery", "backup-battery", }; static struct pda_power_pdata power_supply_info = { .init = power_supply_init, .is_ac_online = h1940_is_ac_online, .exit = power_supply_exit, .supplied_to = h1940_supplicants, .num_supplicants = ARRAY_SIZE(h1940_supplicants), }; static struct resource power_supply_resources[] = { [0] = DEFINE_RES_NAMED(IRQ_EINT2, 1, "ac", IORESOURCE_IRQ \ | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE), }; static struct platform_device power_supply = { .name = "pda-power", .id = -1, .dev = { .platform_data = &power_supply_info, }, .resource = power_supply_resources, .num_resources = ARRAY_SIZE(power_supply_resources), }; static const struct s3c_adc_bat_thresh bat_lut_noac[] = { { .volt = 4070, .cur = 162, .level = 100}, { .volt = 4040, .cur = 165, .level = 95}, { .volt = 4016, .cur = 164, .level = 90}, { .volt = 3996, .cur = 166, .level = 85}, { .volt = 3971, .cur = 168, .level = 80}, { .volt = 3951, .cur = 168, .level = 75}, { .volt = 3931, .cur = 170, .level = 70}, { .volt = 3903, .cur = 172, .level = 65}, { .volt = 3886, .cur = 172, .level = 60}, { .volt = 3858, .cur = 176, .level = 55}, { .volt = 3842, .cur = 176, .level = 50}, { .volt = 3818, .cur = 176, .level = 45}, { .volt = 3789, .cur = 180, .level = 40}, { .volt = 3769, .cur = 180, .level = 35}, { .volt = 3749, .cur = 184, .level = 30}, { .volt = 3732, .cur = 184, .level = 25}, { .volt = 3716, .cur = 184, .level = 20}, { .volt = 3708, .cur = 184, .level = 15}, { .volt = 3716, .cur = 96, .level = 10}, { .volt = 3700, .cur = 96, .level = 5}, { .volt = 3684, .cur = 96, .level = 0}, }; static const struct s3c_adc_bat_thresh bat_lut_acin[] = { { .volt = 4130, .cur = 0, .level = 100}, { .volt = 3982, .cur = 0, .level = 50}, { .volt = 3854, .cur = 0, .level = 10}, { .volt = 3841, .cur = 0, .level = 0}, }; static int h1940_bat_init(void) { int ret; ret = gpio_request(H1940_LATCH_SM803_ENABLE, "h1940-charger-enable"); if (ret) return ret; gpio_direction_output(H1940_LATCH_SM803_ENABLE, 0); return 0; } static void h1940_bat_exit(void) { gpio_free(H1940_LATCH_SM803_ENABLE); } static void h1940_enable_charger(void) { gpio_set_value(H1940_LATCH_SM803_ENABLE, 1); } static void h1940_disable_charger(void) { gpio_set_value(H1940_LATCH_SM803_ENABLE, 0); } static struct s3c_adc_bat_pdata h1940_bat_cfg = { .init = h1940_bat_init, .exit = h1940_bat_exit, .enable_charger = h1940_enable_charger, .disable_charger = h1940_disable_charger, .gpio_charge_finished = S3C2410_GPF(3), .gpio_inverted = 1, .lut_noac = bat_lut_noac, .lut_noac_cnt = ARRAY_SIZE(bat_lut_noac), .lut_acin = bat_lut_acin, .lut_acin_cnt = ARRAY_SIZE(bat_lut_acin), .volt_channel = 0, .current_channel = 1, .volt_mult = 4056, .current_mult = 1893, .internal_impedance = 200, .backup_volt_channel = 3, /* TODO Check backup volt multiplier */ .backup_volt_mult = 4056, .backup_volt_min = 0, .backup_volt_max = 4149288 }; static struct platform_device h1940_battery = { .name = "s3c-adc-battery", .id = -1, .dev = { .parent = &s3c_device_adc.dev, .platform_data = &h1940_bat_cfg, }, }; static DEFINE_SPINLOCK(h1940_blink_spin); int h1940_led_blink_set(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off) { int blink_gpio, check_gpio1, check_gpio2; int gpio = desc ? desc_to_gpio(desc) : -EINVAL; switch (gpio) { case H1940_LATCH_LED_GREEN: blink_gpio = S3C2410_GPA(7); check_gpio1 = S3C2410_GPA(1); check_gpio2 = S3C2410_GPA(3); break; case H1940_LATCH_LED_RED: blink_gpio = S3C2410_GPA(1); check_gpio1 = S3C2410_GPA(7); check_gpio2 = S3C2410_GPA(3); break; default: blink_gpio = S3C2410_GPA(3); check_gpio1 = S3C2410_GPA(1); check_gpio2 = S3C2410_GPA(7); break; } if (delay_on && delay_off && !*delay_on && !*delay_off) *delay_on = *delay_off = 500; spin_lock(&h1940_blink_spin); switch (state) { case GPIO_LED_NO_BLINK_LOW: case GPIO_LED_NO_BLINK_HIGH: if (!gpio_get_value(check_gpio1) && !gpio_get_value(check_gpio2)) gpio_set_value(H1940_LATCH_LED_FLASH, 0); gpio_set_value(blink_gpio, 0); if (gpio_is_valid(gpio)) gpio_set_value(gpio, state); break; case GPIO_LED_BLINK: if (gpio_is_valid(gpio)) gpio_set_value(gpio, 0); gpio_set_value(H1940_LATCH_LED_FLASH, 1); gpio_set_value(blink_gpio, 1); break; } spin_unlock(&h1940_blink_spin); return 0; } EXPORT_SYMBOL(h1940_led_blink_set); static struct gpio_led h1940_leds_desc[] = { { .name = "Green", .default_trigger = "main-battery-full", .gpio = H1940_LATCH_LED_GREEN, .retain_state_suspended = 1, }, { .name = "Red", .default_trigger = "main-battery-charging-blink-full-solid", .gpio = H1940_LATCH_LED_RED, .retain_state_suspended = 1, }, }; static struct gpio_led_platform_data h1940_leds_pdata = { .num_leds = ARRAY_SIZE(h1940_leds_desc), .leds = h1940_leds_desc, .gpio_blink_set = h1940_led_blink_set, }; static struct platform_device h1940_device_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &h1940_leds_pdata, }, }; static struct platform_device h1940_device_bluetooth = { .name = "h1940-bt", .id = -1, }; static void h1940_set_mmc_power(unsigned char power_mode, unsigned short vdd) { switch (power_mode) { case MMC_POWER_OFF: gpio_set_value(H1940_LATCH_SD_POWER, 0); break; case MMC_POWER_UP: case MMC_POWER_ON: gpio_set_value(H1940_LATCH_SD_POWER, 1); break; default: break; } } static struct s3c24xx_mci_pdata h1940_mmc_cfg __initdata = { .gpio_detect = S3C2410_GPF(5), .gpio_wprotect = S3C2410_GPH(8), .set_power = h1940_set_mmc_power, .ocr_avail = MMC_VDD_32_33, }; static int h1940_backlight_init(struct device *dev) { gpio_request(S3C2410_GPB(0), "Backlight"); gpio_direction_output(S3C2410_GPB(0), 0); s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE); s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0); gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 1); return 0; } static int h1940_backlight_notify(struct device *dev, int brightness) { if (!brightness) { gpio_direction_output(S3C2410_GPB(0), 1); gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 0); } else { gpio_direction_output(S3C2410_GPB(0), 0); s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE); s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0); gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 1); } return brightness; } static void h1940_backlight_exit(struct device *dev) { gpio_direction_output(S3C2410_GPB(0), 1); gpio_set_value(H1940_LATCH_MAX1698_nSHUTDOWN, 0); } static struct platform_pwm_backlight_data backlight_data = { .pwm_id = 0, .max_brightness = 100, .dft_brightness = 50, /* tcnt = 0x31 */ .pwm_period_ns = 36296, .enable_gpio = -1, .init = h1940_backlight_init, .notify = h1940_backlight_notify, .exit = h1940_backlight_exit, }; static struct platform_device h1940_backlight = { .name = "pwm-backlight", .dev = { .parent = &samsung_device_pwm.dev, .platform_data = &backlight_data, }, .id = -1, }; static void h1940_lcd_power_set(struct plat_lcd_data *pd, unsigned int power) { int value, retries = 100; if (!power) { gpio_set_value(S3C2410_GPC(0), 0); /* wait for 3ac */ do { value = gpio_get_value(S3C2410_GPC(6)); } while (value && retries--); gpio_set_value(H1940_LATCH_LCD_P2, 0); gpio_set_value(H1940_LATCH_LCD_P3, 0); gpio_set_value(H1940_LATCH_LCD_P4, 0); gpio_direction_output(S3C2410_GPC(1), 0); gpio_direction_output(S3C2410_GPC(4), 0); gpio_set_value(H1940_LATCH_LCD_P1, 0); gpio_set_value(H1940_LATCH_LCD_P0, 0); gpio_set_value(S3C2410_GPC(5), 0); } else { gpio_set_value(H1940_LATCH_LCD_P0, 1); gpio_set_value(H1940_LATCH_LCD_P1, 1); gpio_direction_input(S3C2410_GPC(1)); gpio_direction_input(S3C2410_GPC(4)); mdelay(10); s3c_gpio_cfgpin(S3C2410_GPC(1), S3C_GPIO_SFN(2)); s3c_gpio_cfgpin(S3C2410_GPC(4), S3C_GPIO_SFN(2)); gpio_set_value(S3C2410_GPC(5), 1); gpio_set_value(S3C2410_GPC(0), 1); gpio_set_value(H1940_LATCH_LCD_P3, 1); gpio_set_value(H1940_LATCH_LCD_P2, 1); gpio_set_value(H1940_LATCH_LCD_P4, 1); } } static struct plat_lcd_data h1940_lcd_power_data = { .set_power = h1940_lcd_power_set, }; static struct platform_device h1940_lcd_powerdev = { .name = "platform-lcd", .dev.parent = &s3c_device_lcd.dev, .dev.platform_data = &h1940_lcd_power_data, }; static struct uda1380_platform_data uda1380_info = { .gpio_power = H1940_LATCH_UDA_POWER, .gpio_reset = S3C2410_GPA(12), .dac_clk = UDA1380_DAC_CLK_SYSCLK, }; static struct i2c_board_info h1940_i2c_devices[] = { { I2C_BOARD_INFO("uda1380", 0x1a), .platform_data = &uda1380_info, }, }; #define DECLARE_BUTTON(p, k, n, w) \ { \ .gpio = p, \ .code = k, \ .desc = n, \ .wakeup = w, \ .active_low = 1, \ } static struct gpio_keys_button h1940_buttons[] = { DECLARE_BUTTON(S3C2410_GPF(0), KEY_POWER, "Power", 1), DECLARE_BUTTON(S3C2410_GPF(6), KEY_ENTER, "Select", 1), DECLARE_BUTTON(S3C2410_GPF(7), KEY_RECORD, "Record", 0), DECLARE_BUTTON(S3C2410_GPG(0), KEY_F11, "Calendar", 0), DECLARE_BUTTON(S3C2410_GPG(2), KEY_F12, "Contacts", 0), DECLARE_BUTTON(S3C2410_GPG(3), KEY_MAIL, "Mail", 0), DECLARE_BUTTON(S3C2410_GPG(6), KEY_LEFT, "Left_arrow", 0), DECLARE_BUTTON(S3C2410_GPG(7), KEY_HOMEPAGE, "Home", 0), DECLARE_BUTTON(S3C2410_GPG(8), KEY_RIGHT, "Right_arrow", 0), DECLARE_BUTTON(S3C2410_GPG(9), KEY_UP, "Up_arrow", 0), DECLARE_BUTTON(S3C2410_GPG(10), KEY_DOWN, "Down_arrow", 0), }; static struct gpio_keys_platform_data h1940_buttons_data = { .buttons = h1940_buttons, .nbuttons = ARRAY_SIZE(h1940_buttons), }; static struct platform_device h1940_dev_buttons = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &h1940_buttons_data, } }; static struct platform_device *h1940_devices[] __initdata = { &h1940_dev_buttons, &s3c_device_ohci, &s3c_device_lcd, &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_iis, &s3c_device_usbgadget, &h1940_device_leds, &h1940_device_bluetooth, &s3c_device_sdi, &s3c_device_rtc, &samsung_device_pwm, &h1940_backlight, &h1940_lcd_powerdev, &s3c_device_adc, &s3c_device_ts, &power_supply, &h1940_battery, }; static void __init h1940_map_io(void) { s3c24xx_init_io(h1940_iodesc, ARRAY_SIZE(h1940_iodesc)); s3c24xx_init_uarts(h1940_uartcfgs, ARRAY_SIZE(h1940_uartcfgs)); samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4); /* setup PM */ #ifdef CONFIG_PM_H1940 memcpy(phys_to_virt(H1940_SUSPEND_RESUMEAT), h1940_pm_return, 1024); #endif s3c_pm_init(); /* Add latch gpio chip, set latch initial value */ h1940_latch_control(0, 0); WARN_ON(gpiochip_add(&h1940_latch_gpiochip)); } static void __init h1940_init_time(void) { s3c2410_init_clocks(12000000); samsung_timer_init(); } /* H1940 and RX3715 need to reserve this for suspend */ static void __init h1940_reserve(void) { memblock_reserve(0x30003000, 0x1000); memblock_reserve(0x30081000, 0x1000); } static void __init h1940_init(void) { u32 tmp; s3c24xx_fb_set_platdata(&h1940_fb_info); s3c24xx_mci_set_platdata(&h1940_mmc_cfg); s3c24xx_udc_set_platdata(&h1940_udc_cfg); s3c24xx_ts_set_platdata(&h1940_ts_cfg); s3c_i2c0_set_platdata(NULL); /* Turn off suspend on both USB ports, and switch the * selectable USB port to USB device mode. */ s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST | S3C2410_MISCCR_USBSUSPND0 | S3C2410_MISCCR_USBSUSPND1, 0x0); tmp = (0x78 << S3C24XX_PLL_MDIV_SHIFT) | (0x02 << S3C24XX_PLL_PDIV_SHIFT) | (0x03 << S3C24XX_PLL_SDIV_SHIFT); writel(tmp, S3C2410_UPLLCON); gpio_request(S3C2410_GPC(0), "LCD power"); gpio_request(S3C2410_GPC(1), "LCD power"); gpio_request(S3C2410_GPC(4), "LCD power"); gpio_request(S3C2410_GPC(5), "LCD power"); gpio_request(S3C2410_GPC(6), "LCD power"); gpio_request(H1940_LATCH_LCD_P0, "LCD power"); gpio_request(H1940_LATCH_LCD_P1, "LCD power"); gpio_request(H1940_LATCH_LCD_P2, "LCD power"); gpio_request(H1940_LATCH_LCD_P3, "LCD power"); gpio_request(H1940_LATCH_LCD_P4, "LCD power"); gpio_request(H1940_LATCH_MAX1698_nSHUTDOWN, "LCD power"); gpio_direction_output(S3C2410_GPC(0), 0); gpio_direction_output(S3C2410_GPC(1), 0); gpio_direction_output(S3C2410_GPC(4), 0); gpio_direction_output(S3C2410_GPC(5), 0); gpio_direction_input(S3C2410_GPC(6)); gpio_direction_output(H1940_LATCH_LCD_P0, 0); gpio_direction_output(H1940_LATCH_LCD_P1, 0); gpio_direction_output(H1940_LATCH_LCD_P2, 0); gpio_direction_output(H1940_LATCH_LCD_P3, 0); gpio_direction_output(H1940_LATCH_LCD_P4, 0); gpio_direction_output(H1940_LATCH_MAX1698_nSHUTDOWN, 0); gpio_request(H1940_LATCH_SD_POWER, "SD power"); gpio_direction_output(H1940_LATCH_SD_POWER, 0); platform_add_devices(h1940_devices, ARRAY_SIZE(h1940_devices)); gpio_request(S3C2410_GPA(1), "Red LED blink"); gpio_request(S3C2410_GPA(3), "Blue LED blink"); gpio_request(S3C2410_GPA(7), "Green LED blink"); gpio_request(H1940_LATCH_LED_FLASH, "LED blink"); gpio_direction_output(S3C2410_GPA(1), 0); gpio_direction_output(S3C2410_GPA(3), 0); gpio_direction_output(S3C2410_GPA(7), 0); gpio_direction_output(H1940_LATCH_LED_FLASH, 0); i2c_register_board_info(0, h1940_i2c_devices, ARRAY_SIZE(h1940_i2c_devices)); } MACHINE_START(H1940, "IPAQ-H1940") /* Maintainer: Ben Dooks <ben-linux@fluff.org> */ .atag_offset = 0x100, .map_io = h1940_map_io, .reserve = h1940_reserve, .init_irq = s3c2410_init_irq, .init_machine = h1940_init, .init_time = h1940_init_time, MACHINE_END
gpl-2.0
MassStash/htc_m9_kernel_sense_5.0.2
arch/powerpc/platforms/pseries/pci.c
2106
4212
/* * Copyright (C) 2001 Dave Engebretsen, IBM Corporation * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * * pSeries specific routines for PCI. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <asm/eeh.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <asm/ppc-pci.h> #if 0 void pcibios_name_device(struct pci_dev *dev) { struct device_node *dn; /* * Add IBM loc code (slot) as a prefix to the device names for service */ dn = pci_device_to_OF_node(dev); if (dn) { const char *loc_code = of_get_property(dn, "ibm,loc-code", NULL); if (loc_code) { int loc_len = strlen(loc_code); if (loc_len < sizeof(dev->dev.name)) { memmove(dev->dev.name+loc_len+1, dev->dev.name, sizeof(dev->dev.name)-loc_len-1); memcpy(dev->dev.name, loc_code, loc_len); dev->dev.name[loc_len] = ' '; dev->dev.name[sizeof(dev->dev.name)-1] = '\0'; } } } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); #endif static void __init pSeries_request_regions(void) { if (!isa_io_base) return; request_region(0x20,0x20,"pic1"); request_region(0xa0,0x20,"pic2"); request_region(0x00,0x20,"dma1"); request_region(0x40,0x20,"timer"); request_region(0x80,0x10,"dma page reg"); request_region(0xc0,0x20,"dma2"); } void __init pSeries_final_fixup(void) { pSeries_request_regions(); eeh_addr_cache_build(); } /* * Assume the winbond 82c105 is the IDE controller on a * p610/p615/p630. We should probably be more careful in case * someone tries to plug in a similar adapter. */ static void fixup_winbond_82c105(struct pci_dev* dev) { int i; unsigned int reg; if (!machine_is(pseries)) return; printk("Using INTC for W82c105 IDE controller.\n"); pci_read_config_dword(dev, 0x40, &reg); /* Enable LEGIRQ to use INTC instead of ISA interrupts */ pci_write_config_dword(dev, 0x40, reg | (1<<11)); for (i = 0; i < DEVICE_COUNT_RESOURCE; ++i) { /* zap the 2nd function of the winbond chip */ if (dev->resource[i].flags & IORESOURCE_IO && dev->bus->number == 0 && dev->devfn == 0x81) dev->resource[i].flags &= ~IORESOURCE_IO; if (dev->resource[i].start == 0 && dev->resource[i].end) { dev->resource[i].flags = 0; dev->resource[i].end = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, fixup_winbond_82c105); int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) { struct device_node *dn, *pdn; struct pci_bus *bus; const uint32_t *pcie_link_speed_stats; bus = bridge->bus; dn = pcibios_get_phb_of_node(bus); if (!dn) return 0; for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, "ibm,pcie-link-speed-stats", NULL); if (pcie_link_speed_stats) break; } of_node_put(pdn); if (!pcie_link_speed_stats) { pr_err("no ibm,pcie-link-speed-stats property\n"); return 0; } switch (pcie_link_speed_stats[0]) { case 0x01: bus->max_bus_speed = PCIE_SPEED_2_5GT; break; case 0x02: bus->max_bus_speed = PCIE_SPEED_5_0GT; break; default: bus->max_bus_speed = PCI_SPEED_UNKNOWN; break; } switch (pcie_link_speed_stats[1]) { case 0x01: bus->cur_bus_speed = PCIE_SPEED_2_5GT; break; case 0x02: bus->cur_bus_speed = PCIE_SPEED_5_0GT; break; default: bus->cur_bus_speed = PCI_SPEED_UNKNOWN; break; } return 0; }
gpl-2.0
TimesysGit/advantech-linux
drivers/tty/synclink.c
2106
235415
/* * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $ * * Device driver for Microgate SyncLink ISA and PCI * high speed multiprotocol serial adapters. * * written by Paul Fulghum for Microgate Corporation * paulkf@microgate.com * * Microgate and SyncLink are trademarks of Microgate Corporation * * Derived from serial.c written by Theodore Ts'o and Linus Torvalds * * Original release 01/11/99 * * This code is released under the GNU General Public License (GPL) * * This driver is primarily intended for use in synchronous * HDLC mode. Asynchronous mode is also provided. * * When operating in synchronous mode, each call to mgsl_write() * contains exactly one complete HDLC frame. Calling mgsl_put_char * will start assembling an HDLC frame that will not be sent until * mgsl_flush_chars or mgsl_write is called. * * Synchronous receive data is reported as complete frames. To accomplish * this, the TTY flip buffer is bypassed (too small to hold largest * frame and may fragment frames) and the line discipline * receive entry point is called directly. * * This driver has been tested with a slightly modified ppp.c driver * for synchronous PPP. * * 2000/02/16 * Added interface for syncppp.c driver (an alternate synchronous PPP * implementation that also supports Cisco HDLC). Each device instance * registers as a tty device AND a network device (if dosyncppp option * is set for the device). The functionality is determined by which * device interface is opened. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #if defined(__i386__) # define BREAKPOINT() asm(" int $3"); #else # define BREAKPOINT() { } #endif #define MAX_ISA_DEVICES 10 #define MAX_PCI_DEVICES 10 #define MAX_TOTAL_DEVICES 20 #include <linux/module.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/ioctl.h> #include <linux/synclink.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include <linux/bitops.h> #include <asm/types.h> #include <linux/termios.h> #include <linux/workqueue.h> #include <linux/hdlc.h> #include <linux/dma-mapping.h> #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) #define SYNCLINK_GENERIC_HDLC 1 #else #define SYNCLINK_GENERIC_HDLC 0 #endif #define GET_USER(error,value,addr) error = get_user(value,addr) #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 #define PUT_USER(error,value,addr) error = put_user(value,addr) #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 #include <asm/uaccess.h> #define RCLRVALUE 0xffff static MGSL_PARAMS default_params = { MGSL_MODE_HDLC, /* unsigned long mode */ 0, /* unsigned char loopback; */ HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 0, /* unsigned long clock_speed; */ 0xff, /* unsigned char addr_filter; */ HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 9600, /* unsigned long data_rate; */ 8, /* unsigned char data_bits; */ 1, /* unsigned char stop_bits; */ ASYNC_PARITY_NONE /* unsigned char parity; */ }; #define SHARED_MEM_ADDRESS_SIZE 0x40000 #define BUFFERLISTSIZE 4096 #define DMABUFFERSIZE 4096 #define MAXRXFRAMES 7 typedef struct _DMABUFFERENTRY { u32 phys_addr; /* 32-bit flat physical address of data buffer */ volatile u16 count; /* buffer size/data count */ volatile u16 status; /* Control/status field */ volatile u16 rcc; /* character count field */ u16 reserved; /* padding required by 16C32 */ u32 link; /* 32-bit flat link to next buffer entry */ char *virt_addr; /* virtual address of data buffer */ u32 phys_entry; /* physical address of this buffer entry */ dma_addr_t dma_addr; } DMABUFFERENTRY, *DMAPBUFFERENTRY; /* The queue of BH actions to be performed */ #define BH_RECEIVE 1 #define BH_TRANSMIT 2 #define BH_STATUS 4 #define IO_PIN_SHUTDOWN_LIMIT 100 struct _input_signal_events { int ri_up; int ri_down; int dsr_up; int dsr_down; int dcd_up; int dcd_down; int cts_up; int cts_down; }; /* transmit holding buffer definitions*/ #define MAX_TX_HOLDING_BUFFERS 5 struct tx_holding_buffer { int buffer_size; unsigned char * buffer; }; /* * Device instance data structure */ struct mgsl_struct { int magic; struct tty_port port; int line; int hw_version; struct mgsl_icount icount; int timeout; int x_char; /* xon/xoff character */ u16 read_status_mask; u16 ignore_status_mask; unsigned char *xmit_buf; int xmit_head; int xmit_tail; int xmit_cnt; wait_queue_head_t status_event_wait_q; wait_queue_head_t event_wait_q; struct timer_list tx_timer; /* HDLC transmit timeout timer */ struct mgsl_struct *next_device; /* device list link */ spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */ struct work_struct task; /* task structure for scheduling bh */ u32 EventMask; /* event trigger mask */ u32 RecordedEvents; /* pending events */ u32 max_frame_size; /* as set by device config */ u32 pending_bh; bool bh_running; /* Protection from multiple */ int isr_overflow; bool bh_requested; int dcd_chkcount; /* check counts to prevent */ int cts_chkcount; /* too many IRQs if a signal */ int dsr_chkcount; /* is floating */ int ri_chkcount; char *buffer_list; /* virtual address of Rx & Tx buffer lists */ u32 buffer_list_phys; dma_addr_t buffer_list_dma_addr; unsigned int rx_buffer_count; /* count of total allocated Rx buffers */ DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */ unsigned int current_rx_buffer; int num_tx_dma_buffers; /* number of tx dma frames required */ int tx_dma_buffers_used; unsigned int tx_buffer_count; /* count of total allocated Tx buffers */ DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */ int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */ int current_tx_buffer; /* next tx dma buffer to be loaded */ unsigned char *intermediate_rxbuffer; int num_tx_holding_buffers; /* number of tx holding buffer allocated */ int get_tx_holding_index; /* next tx holding buffer for adapter to load */ int put_tx_holding_index; /* next tx holding buffer to store user request */ int tx_holding_count; /* number of tx holding buffers waiting */ struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS]; bool rx_enabled; bool rx_overflow; bool rx_rcc_underrun; bool tx_enabled; bool tx_active; u32 idle_mode; u16 cmr_value; u16 tcsr_value; char device_name[25]; /* device instance name */ unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ unsigned char bus; /* expansion bus number (zero based) */ unsigned char function; /* PCI device number */ unsigned int io_base; /* base I/O address of adapter */ unsigned int io_addr_size; /* size of the I/O address range */ bool io_addr_requested; /* true if I/O address requested */ unsigned int irq_level; /* interrupt level */ unsigned long irq_flags; bool irq_requested; /* true if IRQ requested */ unsigned int dma_level; /* DMA channel */ bool dma_requested; /* true if dma channel requested */ u16 mbre_bit; u16 loopback_bits; u16 usc_idle_mode; MGSL_PARAMS params; /* communications parameters */ unsigned char serial_signals; /* current serial signal states */ bool irq_occurred; /* for diagnostics use */ unsigned int init_error; /* Initialization startup error (DIAGS) */ int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */ u32 last_mem_alloc; unsigned char* memory_base; /* shared memory address (PCI only) */ u32 phys_memory_base; bool shared_mem_requested; unsigned char* lcr_base; /* local config registers (PCI only) */ u32 phys_lcr_base; u32 lcr_offset; bool lcr_mem_requested; u32 misc_ctrl_value; char *flag_buf; bool drop_rts_on_tx_done; bool loopmode_insert_requested; bool loopmode_send_done_requested; struct _input_signal_events input_signal_events; /* generic HDLC device parts */ int netcount; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC struct net_device *netdev; #endif }; #define MGSL_MAGIC 0x5401 /* * The size of the serial xmit buffer is 1 page, or 4096 bytes */ #ifndef SERIAL_XMIT_SIZE #define SERIAL_XMIT_SIZE 4096 #endif /* * These macros define the offsets used in calculating the * I/O address of the specified USC registers. */ #define DCPIN 2 /* Bit 1 of I/O address */ #define SDPIN 4 /* Bit 2 of I/O address */ #define DCAR 0 /* DMA command/address register */ #define CCAR SDPIN /* channel command/address register */ #define DATAREG DCPIN + SDPIN /* serial data register */ #define MSBONLY 0x41 #define LSBONLY 0x40 /* * These macros define the register address (ordinal number) * used for writing address/value pairs to the USC. */ #define CMR 0x02 /* Channel mode Register */ #define CCSR 0x04 /* Channel Command/status Register */ #define CCR 0x06 /* Channel Control Register */ #define PSR 0x08 /* Port status Register */ #define PCR 0x0a /* Port Control Register */ #define TMDR 0x0c /* Test mode Data Register */ #define TMCR 0x0e /* Test mode Control Register */ #define CMCR 0x10 /* Clock mode Control Register */ #define HCR 0x12 /* Hardware Configuration Register */ #define IVR 0x14 /* Interrupt Vector Register */ #define IOCR 0x16 /* Input/Output Control Register */ #define ICR 0x18 /* Interrupt Control Register */ #define DCCR 0x1a /* Daisy Chain Control Register */ #define MISR 0x1c /* Misc Interrupt status Register */ #define SICR 0x1e /* status Interrupt Control Register */ #define RDR 0x20 /* Receive Data Register */ #define RMR 0x22 /* Receive mode Register */ #define RCSR 0x24 /* Receive Command/status Register */ #define RICR 0x26 /* Receive Interrupt Control Register */ #define RSR 0x28 /* Receive Sync Register */ #define RCLR 0x2a /* Receive count Limit Register */ #define RCCR 0x2c /* Receive Character count Register */ #define TC0R 0x2e /* Time Constant 0 Register */ #define TDR 0x30 /* Transmit Data Register */ #define TMR 0x32 /* Transmit mode Register */ #define TCSR 0x34 /* Transmit Command/status Register */ #define TICR 0x36 /* Transmit Interrupt Control Register */ #define TSR 0x38 /* Transmit Sync Register */ #define TCLR 0x3a /* Transmit count Limit Register */ #define TCCR 0x3c /* Transmit Character count Register */ #define TC1R 0x3e /* Time Constant 1 Register */ /* * MACRO DEFINITIONS FOR DMA REGISTERS */ #define DCR 0x06 /* DMA Control Register (shared) */ #define DACR 0x08 /* DMA Array count Register (shared) */ #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */ #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */ #define DICR 0x18 /* DMA Interrupt Control Register (shared) */ #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */ #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */ #define TDMR 0x02 /* Transmit DMA mode Register */ #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */ #define TBCR 0x2a /* Transmit Byte count Register */ #define TARL 0x2c /* Transmit Address Register (low) */ #define TARU 0x2e /* Transmit Address Register (high) */ #define NTBCR 0x3a /* Next Transmit Byte count Register */ #define NTARL 0x3c /* Next Transmit Address Register (low) */ #define NTARU 0x3e /* Next Transmit Address Register (high) */ #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */ #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */ #define RBCR 0xaa /* Receive Byte count Register */ #define RARL 0xac /* Receive Address Register (low) */ #define RARU 0xae /* Receive Address Register (high) */ #define NRBCR 0xba /* Next Receive Byte count Register */ #define NRARL 0xbc /* Next Receive Address Register (low) */ #define NRARU 0xbe /* Next Receive Address Register (high) */ /* * MACRO DEFINITIONS FOR MODEM STATUS BITS */ #define MODEMSTATUS_DTR 0x80 #define MODEMSTATUS_DSR 0x40 #define MODEMSTATUS_RTS 0x20 #define MODEMSTATUS_CTS 0x10 #define MODEMSTATUS_RI 0x04 #define MODEMSTATUS_DCD 0x01 /* * Channel Command/Address Register (CCAR) Command Codes */ #define RTCmd_Null 0x0000 #define RTCmd_ResetHighestIus 0x1000 #define RTCmd_TriggerChannelLoadDma 0x2000 #define RTCmd_TriggerRxDma 0x2800 #define RTCmd_TriggerTxDma 0x3000 #define RTCmd_TriggerRxAndTxDma 0x3800 #define RTCmd_PurgeRxFifo 0x4800 #define RTCmd_PurgeTxFifo 0x5000 #define RTCmd_PurgeRxAndTxFifo 0x5800 #define RTCmd_LoadRcc 0x6800 #define RTCmd_LoadTcc 0x7000 #define RTCmd_LoadRccAndTcc 0x7800 #define RTCmd_LoadTC0 0x8800 #define RTCmd_LoadTC1 0x9000 #define RTCmd_LoadTC0AndTC1 0x9800 #define RTCmd_SerialDataLSBFirst 0xa000 #define RTCmd_SerialDataMSBFirst 0xa800 #define RTCmd_SelectBigEndian 0xb000 #define RTCmd_SelectLittleEndian 0xb800 /* * DMA Command/Address Register (DCAR) Command Codes */ #define DmaCmd_Null 0x0000 #define DmaCmd_ResetTxChannel 0x1000 #define DmaCmd_ResetRxChannel 0x1200 #define DmaCmd_StartTxChannel 0x2000 #define DmaCmd_StartRxChannel 0x2200 #define DmaCmd_ContinueTxChannel 0x3000 #define DmaCmd_ContinueRxChannel 0x3200 #define DmaCmd_PauseTxChannel 0x4000 #define DmaCmd_PauseRxChannel 0x4200 #define DmaCmd_AbortTxChannel 0x5000 #define DmaCmd_AbortRxChannel 0x5200 #define DmaCmd_InitTxChannel 0x7000 #define DmaCmd_InitRxChannel 0x7200 #define DmaCmd_ResetHighestDmaIus 0x8000 #define DmaCmd_ResetAllChannels 0x9000 #define DmaCmd_StartAllChannels 0xa000 #define DmaCmd_ContinueAllChannels 0xb000 #define DmaCmd_PauseAllChannels 0xc000 #define DmaCmd_AbortAllChannels 0xd000 #define DmaCmd_InitAllChannels 0xf000 #define TCmd_Null 0x0000 #define TCmd_ClearTxCRC 0x2000 #define TCmd_SelectTicrTtsaData 0x4000 #define TCmd_SelectTicrTxFifostatus 0x5000 #define TCmd_SelectTicrIntLevel 0x6000 #define TCmd_SelectTicrdma_level 0x7000 #define TCmd_SendFrame 0x8000 #define TCmd_SendAbort 0x9000 #define TCmd_EnableDleInsertion 0xc000 #define TCmd_DisableDleInsertion 0xd000 #define TCmd_ClearEofEom 0xe000 #define TCmd_SetEofEom 0xf000 #define RCmd_Null 0x0000 #define RCmd_ClearRxCRC 0x2000 #define RCmd_EnterHuntmode 0x3000 #define RCmd_SelectRicrRtsaData 0x4000 #define RCmd_SelectRicrRxFifostatus 0x5000 #define RCmd_SelectRicrIntLevel 0x6000 #define RCmd_SelectRicrdma_level 0x7000 /* * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR) */ #define RECEIVE_STATUS BIT5 #define RECEIVE_DATA BIT4 #define TRANSMIT_STATUS BIT3 #define TRANSMIT_DATA BIT2 #define IO_PIN BIT1 #define MISC BIT0 /* * Receive status Bits in Receive Command/status Register RCSR */ #define RXSTATUS_SHORT_FRAME BIT8 #define RXSTATUS_CODE_VIOLATION BIT8 #define RXSTATUS_EXITED_HUNT BIT7 #define RXSTATUS_IDLE_RECEIVED BIT6 #define RXSTATUS_BREAK_RECEIVED BIT5 #define RXSTATUS_ABORT_RECEIVED BIT5 #define RXSTATUS_RXBOUND BIT4 #define RXSTATUS_CRC_ERROR BIT3 #define RXSTATUS_FRAMING_ERROR BIT3 #define RXSTATUS_ABORT BIT2 #define RXSTATUS_PARITY_ERROR BIT2 #define RXSTATUS_OVERRUN BIT1 #define RXSTATUS_DATA_AVAILABLE BIT0 #define RXSTATUS_ALL 0x01f6 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) ) /* * Values for setting transmit idle mode in * Transmit Control/status Register (TCSR) */ #define IDLEMODE_FLAGS 0x0000 #define IDLEMODE_ALT_ONE_ZERO 0x0100 #define IDLEMODE_ZERO 0x0200 #define IDLEMODE_ONE 0x0300 #define IDLEMODE_ALT_MARK_SPACE 0x0500 #define IDLEMODE_SPACE 0x0600 #define IDLEMODE_MARK 0x0700 #define IDLEMODE_MASK 0x0700 /* * IUSC revision identifiers */ #define IUSC_SL1660 0x4d44 #define IUSC_PRE_SL1660 0x4553 /* * Transmit status Bits in Transmit Command/status Register (TCSR) */ #define TCSR_PRESERVE 0x0F00 #define TCSR_UNDERWAIT BIT11 #define TXSTATUS_PREAMBLE_SENT BIT7 #define TXSTATUS_IDLE_SENT BIT6 #define TXSTATUS_ABORT_SENT BIT5 #define TXSTATUS_EOF_SENT BIT4 #define TXSTATUS_EOM_SENT BIT4 #define TXSTATUS_CRC_SENT BIT3 #define TXSTATUS_ALL_SENT BIT2 #define TXSTATUS_UNDERRUN BIT1 #define TXSTATUS_FIFO_EMPTY BIT0 #define TXSTATUS_ALL 0x00fa #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) ) #define MISCSTATUS_RXC_LATCHED BIT15 #define MISCSTATUS_RXC BIT14 #define MISCSTATUS_TXC_LATCHED BIT13 #define MISCSTATUS_TXC BIT12 #define MISCSTATUS_RI_LATCHED BIT11 #define MISCSTATUS_RI BIT10 #define MISCSTATUS_DSR_LATCHED BIT9 #define MISCSTATUS_DSR BIT8 #define MISCSTATUS_DCD_LATCHED BIT7 #define MISCSTATUS_DCD BIT6 #define MISCSTATUS_CTS_LATCHED BIT5 #define MISCSTATUS_CTS BIT4 #define MISCSTATUS_RCC_UNDERRUN BIT3 #define MISCSTATUS_DPLL_NO_SYNC BIT2 #define MISCSTATUS_BRG1_ZERO BIT1 #define MISCSTATUS_BRG0_ZERO BIT0 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0)) #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f)) #define SICR_RXC_ACTIVE BIT15 #define SICR_RXC_INACTIVE BIT14 #define SICR_RXC (BIT15+BIT14) #define SICR_TXC_ACTIVE BIT13 #define SICR_TXC_INACTIVE BIT12 #define SICR_TXC (BIT13+BIT12) #define SICR_RI_ACTIVE BIT11 #define SICR_RI_INACTIVE BIT10 #define SICR_RI (BIT11+BIT10) #define SICR_DSR_ACTIVE BIT9 #define SICR_DSR_INACTIVE BIT8 #define SICR_DSR (BIT9+BIT8) #define SICR_DCD_ACTIVE BIT7 #define SICR_DCD_INACTIVE BIT6 #define SICR_DCD (BIT7+BIT6) #define SICR_CTS_ACTIVE BIT5 #define SICR_CTS_INACTIVE BIT4 #define SICR_CTS (BIT5+BIT4) #define SICR_RCC_UNDERFLOW BIT3 #define SICR_DPLL_NO_SYNC BIT2 #define SICR_BRG1_ZERO BIT1 #define SICR_BRG0_ZERO BIT0 void usc_DisableMasterIrqBit( struct mgsl_struct *info ); void usc_EnableMasterIrqBit( struct mgsl_struct *info ); void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask ); void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask ); void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask ); #define usc_EnableInterrupts( a, b ) \ usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) ) #define usc_DisableInterrupts( a, b ) \ usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) ) #define usc_EnableMasterIrqBit(a) \ usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) ) #define usc_DisableMasterIrqBit(a) \ usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) ) #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) ) /* * Transmit status Bits in Transmit Control status Register (TCSR) * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ #define TXSTATUS_PREAMBLE_SENT BIT7 #define TXSTATUS_IDLE_SENT BIT6 #define TXSTATUS_ABORT_SENT BIT5 #define TXSTATUS_EOF BIT4 #define TXSTATUS_CRC_SENT BIT3 #define TXSTATUS_ALL_SENT BIT2 #define TXSTATUS_UNDERRUN BIT1 #define TXSTATUS_FIFO_EMPTY BIT0 #define DICR_MASTER BIT15 #define DICR_TRANSMIT BIT0 #define DICR_RECEIVE BIT1 #define usc_EnableDmaInterrupts(a,b) \ usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) ) #define usc_DisableDmaInterrupts(a,b) \ usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) ) #define usc_EnableStatusIrqs(a,b) \ usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) ) #define usc_DisablestatusIrqs(a,b) \ usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) ) /* Transmit status Bits in Transmit Control status Register (TCSR) */ /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ #define DISABLE_UNCONDITIONAL 0 #define DISABLE_END_OF_FRAME 1 #define ENABLE_UNCONDITIONAL 2 #define ENABLE_AUTO_CTS 3 #define ENABLE_AUTO_DCD 3 #define usc_EnableTransmitter(a,b) \ usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) ) #define usc_EnableReceiver(a,b) \ usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) ) static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port ); static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value ); static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ); static u16 usc_InReg( struct mgsl_struct *info, u16 Port ); static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value ); static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ); void usc_RCmd( struct mgsl_struct *info, u16 Cmd ); void usc_TCmd( struct mgsl_struct *info, u16 Cmd ); #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b))) #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b)) #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1)) static void usc_process_rxoverrun_sync( struct mgsl_struct *info ); static void usc_start_receiver( struct mgsl_struct *info ); static void usc_stop_receiver( struct mgsl_struct *info ); static void usc_start_transmitter( struct mgsl_struct *info ); static void usc_stop_transmitter( struct mgsl_struct *info ); static void usc_set_txidle( struct mgsl_struct *info ); static void usc_load_txfifo( struct mgsl_struct *info ); static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate ); static void usc_enable_loopback( struct mgsl_struct *info, int enable ); static void usc_get_serial_signals( struct mgsl_struct *info ); static void usc_set_serial_signals( struct mgsl_struct *info ); static void usc_reset( struct mgsl_struct *info ); static void usc_set_sync_mode( struct mgsl_struct *info ); static void usc_set_sdlc_mode( struct mgsl_struct *info ); static void usc_set_async_mode( struct mgsl_struct *info ); static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate ); static void usc_loopback_frame( struct mgsl_struct *info ); static void mgsl_tx_timeout(unsigned long context); static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ); static void usc_loopmode_insert_request( struct mgsl_struct * info ); static int usc_loopmode_active( struct mgsl_struct * info); static void usc_loopmode_send_done( struct mgsl_struct * info ); static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); #if SYNCLINK_GENERIC_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(struct mgsl_struct *info); static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); static int hdlcdev_init(struct mgsl_struct *info); static void hdlcdev_exit(struct mgsl_struct *info); #endif /* * Defines a BUS descriptor value for the PCI adapter * local bus address ranges. */ #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \ (0x00400020 + \ ((WrHold) << 30) + \ ((WrDly) << 28) + \ ((RdDly) << 26) + \ ((Nwdd) << 20) + \ ((Nwad) << 15) + \ ((Nxda) << 13) + \ ((Nrdd) << 11) + \ ((Nrad) << 6) ) static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit); /* * Adapter diagnostic routines */ static bool mgsl_register_test( struct mgsl_struct *info ); static bool mgsl_irq_test( struct mgsl_struct *info ); static bool mgsl_dma_test( struct mgsl_struct *info ); static bool mgsl_memory_test( struct mgsl_struct *info ); static int mgsl_adapter_test( struct mgsl_struct *info ); /* * device and resource management routines */ static int mgsl_claim_resources(struct mgsl_struct *info); static void mgsl_release_resources(struct mgsl_struct *info); static void mgsl_add_device(struct mgsl_struct *info); static struct mgsl_struct* mgsl_allocate_device(void); /* * DMA buffer manupulation functions. */ static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ); static bool mgsl_get_rx_frame( struct mgsl_struct *info ); static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info ); static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ); static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ); static int num_free_tx_dma_buffers(struct mgsl_struct *info); static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize); static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count); /* * DMA and Shared Memory buffer allocation and formatting */ static int mgsl_allocate_dma_buffers(struct mgsl_struct *info); static void mgsl_free_dma_buffers(struct mgsl_struct *info); static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info); static void mgsl_free_buffer_list_memory(struct mgsl_struct *info); static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info); static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info); static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info); static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info); static bool load_next_tx_holding_buffer(struct mgsl_struct *info); static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize); /* * Bottom half interrupt handlers */ static void mgsl_bh_handler(struct work_struct *work); static void mgsl_bh_receive(struct mgsl_struct *info); static void mgsl_bh_transmit(struct mgsl_struct *info); static void mgsl_bh_status(struct mgsl_struct *info); /* * Interrupt handler routines and dispatch table. */ static void mgsl_isr_null( struct mgsl_struct *info ); static void mgsl_isr_transmit_data( struct mgsl_struct *info ); static void mgsl_isr_receive_data( struct mgsl_struct *info ); static void mgsl_isr_receive_status( struct mgsl_struct *info ); static void mgsl_isr_transmit_status( struct mgsl_struct *info ); static void mgsl_isr_io_pin( struct mgsl_struct *info ); static void mgsl_isr_misc( struct mgsl_struct *info ); static void mgsl_isr_receive_dma( struct mgsl_struct *info ); static void mgsl_isr_transmit_dma( struct mgsl_struct *info ); typedef void (*isr_dispatch_func)(struct mgsl_struct *); static isr_dispatch_func UscIsrTable[7] = { mgsl_isr_null, mgsl_isr_misc, mgsl_isr_io_pin, mgsl_isr_transmit_data, mgsl_isr_transmit_status, mgsl_isr_receive_data, mgsl_isr_receive_status }; /* * ioctl call handlers */ static int tiocmget(struct tty_struct *tty); static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount); static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params); static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params); static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode); static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode); static int mgsl_txenable(struct mgsl_struct * info, int enable); static int mgsl_txabort(struct mgsl_struct * info); static int mgsl_rxenable(struct mgsl_struct * info, int enable); static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask); static int mgsl_loopmode_send_done( struct mgsl_struct * info ); /* set non-zero on successful registration with PCI subsystem */ static bool pci_registered; /* * Global linked list of SyncLink devices */ static struct mgsl_struct *mgsl_device_list; static int mgsl_device_count; /* * Set this param to non-zero to load eax with the * .text section address and breakpoint on module load. * This is useful for use with gdb and add-symbol-file command. */ static bool break_on_load; /* * Driver major number, defaults to zero to get auto * assigned major number. May be forced as module parameter. */ static int ttymajor; /* * Array of user specified options for ISA adapters. */ static int io[MAX_ISA_DEVICES]; static int irq[MAX_ISA_DEVICES]; static int dma[MAX_ISA_DEVICES]; static int debug_level; static int maxframe[MAX_TOTAL_DEVICES]; static int txdmabufs[MAX_TOTAL_DEVICES]; static int txholdbufs[MAX_TOTAL_DEVICES]; module_param(break_on_load, bool, 0); module_param(ttymajor, int, 0); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(dma, int, NULL, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); module_param_array(txdmabufs, int, NULL, 0); module_param_array(txholdbufs, int, NULL, 0); static char *driver_name = "SyncLink serial driver"; static char *driver_version = "$Revision: 4.38 $"; static int synclink_init_one (struct pci_dev *dev, const struct pci_device_id *ent); static void synclink_remove_one (struct pci_dev *dev); static struct pci_device_id synclink_pci_tbl[] = { { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); MODULE_LICENSE("GPL"); static struct pci_driver synclink_pci_driver = { .name = "synclink", .id_table = synclink_pci_tbl, .probe = synclink_init_one, .remove = synclink_remove_one, }; static struct tty_driver *serial_driver; /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 static void mgsl_change_params(struct mgsl_struct *info); static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout); /* * 1st function defined in .text section. Calling this function in * init_module() followed by a breakpoint allows a remote debugger * (gdb) to get the .text address for the add-symbol-file command. * This allows remote debugging of dynamically loadable modules. */ static void* mgsl_get_text_ptr(void) { return mgsl_get_text_ptr; } static inline int mgsl_paranoia_check(struct mgsl_struct *info, char *name, const char *routine) { #ifdef MGSL_PARANOIA_CHECK static const char *badmagic = "Warning: bad magic number for mgsl struct (%s) in %s\n"; static const char *badinfo = "Warning: null mgsl_struct for (%s) in %s\n"; if (!info) { printk(badinfo, name, routine); return 1; } if (info->magic != MGSL_MAGIC) { printk(badmagic, name, routine); return 1; } #else if (!info) return 1; #endif return 0; } /** * line discipline callback wrappers * * The wrappers maintain line discipline references * while calling into the line discipline. * * ldisc_receive_buf - pass receive data to line discipline */ static void ldisc_receive_buf(struct tty_struct *tty, const __u8 *data, char *flags, int count) { struct tty_ldisc *ld; if (!tty) return; ld = tty_ldisc_ref(tty); if (ld) { if (ld->ops->receive_buf) ld->ops->receive_buf(tty, data, flags, count); tty_ldisc_deref(ld); } } /* mgsl_stop() throttle (stop) transmitter * * Arguments: tty pointer to tty info structure * Return Value: None */ static void mgsl_stop(struct tty_struct *tty) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("mgsl_stop(%s)\n",info->device_name); spin_lock_irqsave(&info->irq_spinlock,flags); if (info->tx_enabled) usc_stop_transmitter(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } /* end of mgsl_stop() */ /* mgsl_start() release (start) transmitter * * Arguments: tty pointer to tty info structure * Return Value: None */ static void mgsl_start(struct tty_struct *tty) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("mgsl_start(%s)\n",info->device_name); spin_lock_irqsave(&info->irq_spinlock,flags); if (!info->tx_enabled) usc_start_transmitter(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } /* end of mgsl_start() */ /* * Bottom half work queue access functions */ /* mgsl_bh_action() Return next bottom half action to perform. * Return Value: BH action code or 0 if nothing to do. */ static int mgsl_bh_action(struct mgsl_struct *info) { unsigned long flags; int rc = 0; spin_lock_irqsave(&info->irq_spinlock,flags); if (info->pending_bh & BH_RECEIVE) { info->pending_bh &= ~BH_RECEIVE; rc = BH_RECEIVE; } else if (info->pending_bh & BH_TRANSMIT) { info->pending_bh &= ~BH_TRANSMIT; rc = BH_TRANSMIT; } else if (info->pending_bh & BH_STATUS) { info->pending_bh &= ~BH_STATUS; rc = BH_STATUS; } if (!rc) { /* Mark BH routine as complete */ info->bh_running = false; info->bh_requested = false; } spin_unlock_irqrestore(&info->irq_spinlock,flags); return rc; } /* * Perform bottom half processing of work items queued by ISR. */ static void mgsl_bh_handler(struct work_struct *work) { struct mgsl_struct *info = container_of(work, struct mgsl_struct, task); int action; if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):mgsl_bh_handler(%s) entry\n", __FILE__,__LINE__,info->device_name); info->bh_running = true; while((action = mgsl_bh_action(info)) != 0) { /* Process work item */ if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):mgsl_bh_handler() work item action=%d\n", __FILE__,__LINE__,action); switch (action) { case BH_RECEIVE: mgsl_bh_receive(info); break; case BH_TRANSMIT: mgsl_bh_transmit(info); break; case BH_STATUS: mgsl_bh_status(info); break; default: /* unknown work item ID */ printk("Unknown work item ID=%08X!\n", action); break; } } if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):mgsl_bh_handler(%s) exit\n", __FILE__,__LINE__,info->device_name); } static void mgsl_bh_receive(struct mgsl_struct *info) { bool (*get_rx_frame)(struct mgsl_struct *info) = (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame); if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):mgsl_bh_receive(%s)\n", __FILE__,__LINE__,info->device_name); do { if (info->rx_rcc_underrun) { unsigned long flags; spin_lock_irqsave(&info->irq_spinlock,flags); usc_start_receiver(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); return; } } while(get_rx_frame(info)); } static void mgsl_bh_transmit(struct mgsl_struct *info) { struct tty_struct *tty = info->port.tty; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):mgsl_bh_transmit() entry on %s\n", __FILE__,__LINE__,info->device_name); if (tty) tty_wakeup(tty); /* if transmitter idle and loopmode_send_done_requested * then start echoing RxD to TxD */ spin_lock_irqsave(&info->irq_spinlock,flags); if ( !info->tx_active && info->loopmode_send_done_requested ) usc_loopmode_send_done( info ); spin_unlock_irqrestore(&info->irq_spinlock,flags); } static void mgsl_bh_status(struct mgsl_struct *info) { if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):mgsl_bh_status() entry on %s\n", __FILE__,__LINE__,info->device_name); info->ri_chkcount = 0; info->dsr_chkcount = 0; info->dcd_chkcount = 0; info->cts_chkcount = 0; } /* mgsl_isr_receive_status() * * Service a receive status interrupt. The type of status * interrupt is indicated by the state of the RCSR. * This is only used for HDLC mode. * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_isr_receive_status( struct mgsl_struct *info ) { u16 status = usc_InReg( info, RCSR ); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):mgsl_isr_receive_status status=%04X\n", __FILE__,__LINE__,status); if ( (status & RXSTATUS_ABORT_RECEIVED) && info->loopmode_insert_requested && usc_loopmode_active(info) ) { ++info->icount.rxabort; info->loopmode_insert_requested = false; /* clear CMR:13 to start echoing RxD to TxD */ info->cmr_value &= ~BIT13; usc_OutReg(info, CMR, info->cmr_value); /* disable received abort irq (no longer required) */ usc_OutReg(info, RICR, (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED)); } if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) { if (status & RXSTATUS_EXITED_HUNT) info->icount.exithunt++; if (status & RXSTATUS_IDLE_RECEIVED) info->icount.rxidle++; wake_up_interruptible(&info->event_wait_q); } if (status & RXSTATUS_OVERRUN){ info->icount.rxover++; usc_process_rxoverrun_sync( info ); } usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); usc_UnlatchRxstatusBits( info, status ); } /* end of mgsl_isr_receive_status() */ /* mgsl_isr_transmit_status() * * Service a transmit status interrupt * HDLC mode :end of transmit frame * Async mode:all data is sent * transmit status is indicated by bits in the TCSR. * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_isr_transmit_status( struct mgsl_struct *info ) { u16 status = usc_InReg( info, TCSR ); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):mgsl_isr_transmit_status status=%04X\n", __FILE__,__LINE__,status); usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); usc_UnlatchTxstatusBits( info, status ); if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) ) { /* finished sending HDLC abort. This may leave */ /* the TxFifo with data from the aborted frame */ /* so purge the TxFifo. Also shutdown the DMA */ /* channel in case there is data remaining in */ /* the DMA buffer */ usc_DmaCmd( info, DmaCmd_ResetTxChannel ); usc_RTCmd( info, RTCmd_PurgeTxFifo ); } if ( status & TXSTATUS_EOF_SENT ) info->icount.txok++; else if ( status & TXSTATUS_UNDERRUN ) info->icount.txunder++; else if ( status & TXSTATUS_ABORT_SENT ) info->icount.txabort++; else info->icount.txunder++; info->tx_active = false; info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; del_timer(&info->tx_timer); if ( info->drop_rts_on_tx_done ) { usc_get_serial_signals( info ); if ( info->serial_signals & SerialSignal_RTS ) { info->serial_signals &= ~SerialSignal_RTS; usc_set_serial_signals( info ); } info->drop_rts_on_tx_done = false; } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif { if (info->port.tty->stopped || info->port.tty->hw_stopped) { usc_stop_transmitter(info); return; } info->pending_bh |= BH_TRANSMIT; } } /* end of mgsl_isr_transmit_status() */ /* mgsl_isr_io_pin() * * Service an Input/Output pin interrupt. The type of * interrupt is indicated by bits in the MISR * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_isr_io_pin( struct mgsl_struct *info ) { struct mgsl_icount *icount; u16 status = usc_InReg( info, MISR ); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):mgsl_isr_io_pin status=%04X\n", __FILE__,__LINE__,status); usc_ClearIrqPendingBits( info, IO_PIN ); usc_UnlatchIostatusBits( info, status ); if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { icount = &info->icount; /* update input line counters */ if (status & MISCSTATUS_RI_LATCHED) { if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) usc_DisablestatusIrqs(info,SICR_RI); icount->rng++; if ( status & MISCSTATUS_RI ) info->input_signal_events.ri_up++; else info->input_signal_events.ri_down++; } if (status & MISCSTATUS_DSR_LATCHED) { if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) usc_DisablestatusIrqs(info,SICR_DSR); icount->dsr++; if ( status & MISCSTATUS_DSR ) info->input_signal_events.dsr_up++; else info->input_signal_events.dsr_down++; } if (status & MISCSTATUS_DCD_LATCHED) { if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) usc_DisablestatusIrqs(info,SICR_DCD); icount->dcd++; if (status & MISCSTATUS_DCD) { info->input_signal_events.dcd_up++; } else info->input_signal_events.dcd_down++; #if SYNCLINK_GENERIC_HDLC if (info->netcount) { if (status & MISCSTATUS_DCD) netif_carrier_on(info->netdev); else netif_carrier_off(info->netdev); } #endif } if (status & MISCSTATUS_CTS_LATCHED) { if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) usc_DisablestatusIrqs(info,SICR_CTS); icount->cts++; if ( status & MISCSTATUS_CTS ) info->input_signal_events.cts_up++; else info->input_signal_events.cts_down++; } wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); if ( (info->port.flags & ASYNC_CHECK_CD) && (status & MISCSTATUS_DCD_LATCHED) ) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s CD now %s...", info->device_name, (status & MISCSTATUS_DCD) ? "on" : "off"); if (status & MISCSTATUS_DCD) wake_up_interruptible(&info->port.open_wait); else { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("doing serial hangup..."); if (info->port.tty) tty_hangup(info->port.tty); } } if (tty_port_cts_enabled(&info->port) && (status & MISCSTATUS_CTS_LATCHED) ) { if (info->port.tty->hw_stopped) { if (status & MISCSTATUS_CTS) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("CTS tx start..."); if (info->port.tty) info->port.tty->hw_stopped = 0; usc_start_transmitter(info); info->pending_bh |= BH_TRANSMIT; return; } } else { if (!(status & MISCSTATUS_CTS)) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("CTS tx stop..."); if (info->port.tty) info->port.tty->hw_stopped = 1; usc_stop_transmitter(info); } } } } info->pending_bh |= BH_STATUS; /* for diagnostics set IRQ flag */ if ( status & MISCSTATUS_TXC_LATCHED ){ usc_OutReg( info, SICR, (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) ); usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED ); info->irq_occurred = true; } } /* end of mgsl_isr_io_pin() */ /* mgsl_isr_transmit_data() * * Service a transmit data interrupt (async mode only). * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_isr_transmit_data( struct mgsl_struct *info ) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n", __FILE__,__LINE__,info->xmit_cnt); usc_ClearIrqPendingBits( info, TRANSMIT_DATA ); if (info->port.tty->stopped || info->port.tty->hw_stopped) { usc_stop_transmitter(info); return; } if ( info->xmit_cnt ) usc_load_txfifo( info ); else info->tx_active = false; if (info->xmit_cnt < WAKEUP_CHARS) info->pending_bh |= BH_TRANSMIT; } /* end of mgsl_isr_transmit_data() */ /* mgsl_isr_receive_data() * * Service a receive data interrupt. This occurs * when operating in asynchronous interrupt transfer mode. * The receive data FIFO is flushed to the receive data buffers. * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_isr_receive_data( struct mgsl_struct *info ) { int Fifocount; u16 status; int work = 0; unsigned char DataByte; struct mgsl_icount *icount = &info->icount; if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):mgsl_isr_receive_data\n", __FILE__,__LINE__); usc_ClearIrqPendingBits( info, RECEIVE_DATA ); /* select FIFO status for RICR readback */ usc_RCmd( info, RCmd_SelectRicrRxFifostatus ); /* clear the Wordstatus bit so that status readback */ /* only reflects the status of this byte */ usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 )); /* flush the receive FIFO */ while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) { int flag; /* read one byte from RxFIFO */ outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY), info->io_base + CCAR ); DataByte = inb( info->io_base + CCAR ); /* get the status of the received byte */ status = usc_InReg(info, RCSR); if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); icount->rx++; flag = 0; if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) { printk("rxerr=%04X\n",status); /* update error statistics */ if ( status & RXSTATUS_BREAK_RECEIVED ) { status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR); icount->brk++; } else if (status & RXSTATUS_PARITY_ERROR) icount->parity++; else if (status & RXSTATUS_FRAMING_ERROR) icount->frame++; else if (status & RXSTATUS_OVERRUN) { /* must issue purge fifo cmd before */ /* 16C32 accepts more receive chars */ usc_RTCmd(info,RTCmd_PurgeRxFifo); icount->overrun++; } /* discard char if tty control flags say so */ if (status & info->ignore_status_mask) continue; status &= info->read_status_mask; if (status & RXSTATUS_BREAK_RECEIVED) { flag = TTY_BREAK; if (info->port.flags & ASYNC_SAK) do_SAK(info->port.tty); } else if (status & RXSTATUS_PARITY_ERROR) flag = TTY_PARITY; else if (status & RXSTATUS_FRAMING_ERROR) flag = TTY_FRAME; } /* end of if (error) */ tty_insert_flip_char(&info->port, DataByte, flag); if (status & RXSTATUS_OVERRUN) { /* Overrun is special, since it's * reported immediately, and doesn't * affect the current character */ work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN); } } if ( debug_level >= DEBUG_LEVEL_ISR ) { printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", __FILE__,__LINE__,icount->rx,icount->brk, icount->parity,icount->frame,icount->overrun); } if(work) tty_flip_buffer_push(&info->port); } /* mgsl_isr_misc() * * Service a miscellaneous interrupt source. * * Arguments: info pointer to device extension (instance data) * Return Value: None */ static void mgsl_isr_misc( struct mgsl_struct *info ) { u16 status = usc_InReg( info, MISR ); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):mgsl_isr_misc status=%04X\n", __FILE__,__LINE__,status); if ((status & MISCSTATUS_RCC_UNDERRUN) && (info->params.mode == MGSL_MODE_HDLC)) { /* turn off receiver and rx DMA */ usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); usc_DmaCmd(info, DmaCmd_ResetRxChannel); usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS); /* schedule BH handler to restart receiver */ info->pending_bh |= BH_RECEIVE; info->rx_rcc_underrun = true; } usc_ClearIrqPendingBits( info, MISC ); usc_UnlatchMiscstatusBits( info, status ); } /* end of mgsl_isr_misc() */ /* mgsl_isr_null() * * Services undefined interrupt vectors from the * USC. (hence this function SHOULD never be called) * * Arguments: info pointer to device extension (instance data) * Return Value: None */ static void mgsl_isr_null( struct mgsl_struct *info ) { } /* end of mgsl_isr_null() */ /* mgsl_isr_receive_dma() * * Service a receive DMA channel interrupt. * For this driver there are two sources of receive DMA interrupts * as identified in the Receive DMA mode Register (RDMR): * * BIT3 EOA/EOL End of List, all receive buffers in receive * buffer list have been filled (no more free buffers * available). The DMA controller has shut down. * * BIT2 EOB End of Buffer. This interrupt occurs when a receive * DMA buffer is terminated in response to completion * of a good frame or a frame with errors. The status * of the frame is stored in the buffer entry in the * list of receive buffer entries. * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_isr_receive_dma( struct mgsl_struct *info ) { u16 status; /* clear interrupt pending and IUS bit for Rx DMA IRQ */ usc_OutDmaReg( info, CDIR, BIT9+BIT1 ); /* Read the receive DMA status to identify interrupt type. */ /* This also clears the status bits. */ status = usc_InDmaReg( info, RDMR ); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n", __FILE__,__LINE__,info->device_name,status); info->pending_bh |= BH_RECEIVE; if ( status & BIT3 ) { info->rx_overflow = true; info->icount.buf_overrun++; } } /* end of mgsl_isr_receive_dma() */ /* mgsl_isr_transmit_dma() * * This function services a transmit DMA channel interrupt. * * For this driver there is one source of transmit DMA interrupts * as identified in the Transmit DMA Mode Register (TDMR): * * BIT2 EOB End of Buffer. This interrupt occurs when a * transmit DMA buffer has been emptied. * * The driver maintains enough transmit DMA buffers to hold at least * one max frame size transmit frame. When operating in a buffered * transmit mode, there may be enough transmit DMA buffers to hold at * least two or more max frame size frames. On an EOB condition, * determine if there are any queued transmit buffers and copy into * transmit DMA buffers if we have room. * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_isr_transmit_dma( struct mgsl_struct *info ) { u16 status; /* clear interrupt pending and IUS bit for Tx DMA IRQ */ usc_OutDmaReg(info, CDIR, BIT8+BIT0 ); /* Read the transmit DMA status to identify interrupt type. */ /* This also clears the status bits. */ status = usc_InDmaReg( info, TDMR ); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n", __FILE__,__LINE__,info->device_name,status); if ( status & BIT2 ) { --info->tx_dma_buffers_used; /* if there are transmit frames queued, * try to load the next one */ if ( load_next_tx_holding_buffer(info) ) { /* if call returns non-zero value, we have * at least one free tx holding buffer */ info->pending_bh |= BH_TRANSMIT; } } } /* end of mgsl_isr_transmit_dma() */ /* mgsl_interrupt() * * Interrupt service routine entry point. * * Arguments: * * irq interrupt number that caused interrupt * dev_id device ID supplied during interrupt registration * * Return Value: None */ static irqreturn_t mgsl_interrupt(int dummy, void *dev_id) { struct mgsl_struct *info = dev_id; u16 UscVector; u16 DmaVector; if ( debug_level >= DEBUG_LEVEL_ISR ) printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n", __FILE__, __LINE__, info->irq_level); spin_lock(&info->irq_spinlock); for(;;) { /* Read the interrupt vectors from hardware. */ UscVector = usc_InReg(info, IVR) >> 9; DmaVector = usc_InDmaReg(info, DIVR); if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n", __FILE__,__LINE__,info->device_name,UscVector,DmaVector); if ( !UscVector && !DmaVector ) break; /* Dispatch interrupt vector */ if ( UscVector ) (*UscIsrTable[UscVector])(info); else if ( (DmaVector&(BIT10|BIT9)) == BIT10) mgsl_isr_transmit_dma(info); else mgsl_isr_receive_dma(info); if ( info->isr_overflow ) { printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n", __FILE__, __LINE__, info->device_name, info->irq_level); usc_DisableMasterIrqBit(info); usc_DisableDmaInterrupts(info,DICR_MASTER); break; } } /* Request bottom half processing if there's something * for it to do and the bh is not already running */ if ( info->pending_bh && !info->bh_running && !info->bh_requested ) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s queueing bh task.\n", __FILE__,__LINE__,info->device_name); schedule_work(&info->task); info->bh_requested = true; } spin_unlock(&info->irq_spinlock); if ( debug_level >= DEBUG_LEVEL_ISR ) printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n", __FILE__, __LINE__, info->irq_level); return IRQ_HANDLED; } /* end of mgsl_interrupt() */ /* startup() * * Initialize and start device. * * Arguments: info pointer to device instance data * Return Value: 0 if success, otherwise error code */ static int startup(struct mgsl_struct * info) { int retval = 0; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name); if (info->port.flags & ASYNC_INITIALIZED) return 0; if (!info->xmit_buf) { /* allocate a page of memory for a transmit buffer */ info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); if (!info->xmit_buf) { printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", __FILE__,__LINE__,info->device_name); return -ENOMEM; } } info->pending_bh = 0; memset(&info->icount, 0, sizeof(info->icount)); setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info); /* Allocate and claim adapter resources */ retval = mgsl_claim_resources(info); /* perform existence check and diagnostics */ if ( !retval ) retval = mgsl_adapter_test(info); if ( retval ) { if (capable(CAP_SYS_ADMIN) && info->port.tty) set_bit(TTY_IO_ERROR, &info->port.tty->flags); mgsl_release_resources(info); return retval; } /* program hardware for current parameters */ mgsl_change_params(info); if (info->port.tty) clear_bit(TTY_IO_ERROR, &info->port.tty->flags); info->port.flags |= ASYNC_INITIALIZED; return 0; } /* end of startup() */ /* shutdown() * * Called by mgsl_close() and mgsl_hangup() to shutdown hardware * * Arguments: info pointer to device instance data * Return Value: None */ static void shutdown(struct mgsl_struct * info) { unsigned long flags; if (!(info->port.flags & ASYNC_INITIALIZED)) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_shutdown(%s)\n", __FILE__,__LINE__, info->device_name ); /* clear status wait queue because status changes */ /* can't happen after shutting down the hardware */ wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); del_timer_sync(&info->tx_timer); if (info->xmit_buf) { free_page((unsigned long) info->xmit_buf); info->xmit_buf = NULL; } spin_lock_irqsave(&info->irq_spinlock,flags); usc_DisableMasterIrqBit(info); usc_stop_receiver(info); usc_stop_transmitter(info); usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS + TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC ); usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE); /* Disable DMAEN (Port 7, Bit 14) */ /* This disconnects the DMA request signal from the ISA bus */ /* on the ISA adapter. This has no effect for the PCI adapter */ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14)); /* Disable INTEN (Port 6, Bit12) */ /* This disconnects the IRQ request signal to the ISA bus */ /* on the ISA adapter. This has no effect for the PCI adapter */ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12)); if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) { info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); usc_set_serial_signals(info); } spin_unlock_irqrestore(&info->irq_spinlock,flags); mgsl_release_resources(info); if (info->port.tty) set_bit(TTY_IO_ERROR, &info->port.tty->flags); info->port.flags &= ~ASYNC_INITIALIZED; } /* end of shutdown() */ static void mgsl_program_hw(struct mgsl_struct *info) { unsigned long flags; spin_lock_irqsave(&info->irq_spinlock,flags); usc_stop_receiver(info); usc_stop_transmitter(info); info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; if (info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW || info->netcount) usc_set_sync_mode(info); else usc_set_async_mode(info); usc_set_serial_signals(info); info->dcd_chkcount = 0; info->cts_chkcount = 0; info->ri_chkcount = 0; info->dsr_chkcount = 0; usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI); usc_EnableInterrupts(info, IO_PIN); usc_get_serial_signals(info); if (info->netcount || info->port.tty->termios.c_cflag & CREAD) usc_start_receiver(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } /* Reconfigure adapter based on new parameters */ static void mgsl_change_params(struct mgsl_struct *info) { unsigned cflag; int bits_per_char; if (!info->port.tty) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_change_params(%s)\n", __FILE__,__LINE__, info->device_name ); cflag = info->port.tty->termios.c_cflag; /* if B0 rate (hangup) specified then negate RTS and DTR */ /* otherwise assert RTS and DTR */ if (cflag & CBAUD) info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); /* byte size and parity */ switch (cflag & CSIZE) { case CS5: info->params.data_bits = 5; break; case CS6: info->params.data_bits = 6; break; case CS7: info->params.data_bits = 7; break; case CS8: info->params.data_bits = 8; break; /* Never happens, but GCC is too dumb to figure it out */ default: info->params.data_bits = 7; break; } if (cflag & CSTOPB) info->params.stop_bits = 2; else info->params.stop_bits = 1; info->params.parity = ASYNC_PARITY_NONE; if (cflag & PARENB) { if (cflag & PARODD) info->params.parity = ASYNC_PARITY_ODD; else info->params.parity = ASYNC_PARITY_EVEN; #ifdef CMSPAR if (cflag & CMSPAR) info->params.parity = ASYNC_PARITY_SPACE; #endif } /* calculate number of jiffies to transmit a full * FIFO (32 bytes) at specified data rate */ bits_per_char = info->params.data_bits + info->params.stop_bits + 1; /* if port data rate is set to 460800 or less then * allow tty settings to override, otherwise keep the * current data rate. */ if (info->params.data_rate <= 460800) info->params.data_rate = tty_get_baud_rate(info->port.tty); if ( info->params.data_rate ) { info->timeout = (32*HZ*bits_per_char) / info->params.data_rate; } info->timeout += HZ/50; /* Add .02 seconds of slop */ if (cflag & CRTSCTS) info->port.flags |= ASYNC_CTS_FLOW; else info->port.flags &= ~ASYNC_CTS_FLOW; if (cflag & CLOCAL) info->port.flags &= ~ASYNC_CHECK_CD; else info->port.flags |= ASYNC_CHECK_CD; /* process tty input control flags */ info->read_status_mask = RXSTATUS_OVERRUN; if (I_INPCK(info->port.tty)) info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) info->read_status_mask |= RXSTATUS_BREAK_RECEIVED; if (I_IGNPAR(info->port.tty)) info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; if (I_IGNBRK(info->port.tty)) { info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED; /* If ignoring parity and break indicators, ignore * overruns too. (For real raw support). */ if (I_IGNPAR(info->port.tty)) info->ignore_status_mask |= RXSTATUS_OVERRUN; } mgsl_program_hw(info); } /* end of mgsl_change_params() */ /* mgsl_put_char() * * Add a character to the transmit buffer. * * Arguments: tty pointer to tty information structure * ch character to add to transmit buffer * * Return Value: None */ static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; int ret = 0; if (debug_level >= DEBUG_LEVEL_INFO) { printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n", __FILE__, __LINE__, ch, info->device_name); } if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) return 0; if (!info->xmit_buf) return 0; spin_lock_irqsave(&info->irq_spinlock, flags); if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) { if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { info->xmit_buf[info->xmit_head++] = ch; info->xmit_head &= SERIAL_XMIT_SIZE-1; info->xmit_cnt++; ret = 1; } } spin_unlock_irqrestore(&info->irq_spinlock, flags); return ret; } /* end of mgsl_put_char() */ /* mgsl_flush_chars() * * Enable transmitter so remaining characters in the * transmit buffer are sent. * * Arguments: tty pointer to tty information structure * Return Value: None */ static void mgsl_flush_chars(struct tty_struct *tty) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n", __FILE__,__LINE__,info->device_name,info->xmit_cnt); if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars")) return; if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || !info->xmit_buf) return; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n", __FILE__,__LINE__,info->device_name ); spin_lock_irqsave(&info->irq_spinlock,flags); if (!info->tx_active) { if ( (info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) { /* operating in synchronous (frame oriented) mode */ /* copy data from circular xmit_buf to */ /* transmit DMA buffer. */ mgsl_load_tx_dma_buffer(info, info->xmit_buf,info->xmit_cnt); } usc_start_transmitter(info); } spin_unlock_irqrestore(&info->irq_spinlock,flags); } /* end of mgsl_flush_chars() */ /* mgsl_write() * * Send a block of data * * Arguments: * * tty pointer to tty information structure * buf pointer to buffer containing send data * count size of send data in bytes * * Return Value: number of characters written */ static int mgsl_write(struct tty_struct * tty, const unsigned char *buf, int count) { int c, ret = 0; struct mgsl_struct *info = tty->driver_data; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_write(%s) count=%d\n", __FILE__,__LINE__,info->device_name,count); if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) goto cleanup; if (!info->xmit_buf) goto cleanup; if ( info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW ) { /* operating in synchronous (frame oriented) mode */ if (info->tx_active) { if ( info->params.mode == MGSL_MODE_HDLC ) { ret = 0; goto cleanup; } /* transmitter is actively sending data - * if we have multiple transmit dma and * holding buffers, attempt to queue this * frame for transmission at a later time. */ if (info->tx_holding_count >= info->num_tx_holding_buffers ) { /* no tx holding buffers available */ ret = 0; goto cleanup; } /* queue transmit frame request */ ret = count; save_tx_buffer_request(info,buf,count); /* if we have sufficient tx dma buffers, * load the next buffered tx request */ spin_lock_irqsave(&info->irq_spinlock,flags); load_next_tx_holding_buffer(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); goto cleanup; } /* if operating in HDLC LoopMode and the adapter */ /* has yet to be inserted into the loop, we can't */ /* transmit */ if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) && !usc_loopmode_active(info) ) { ret = 0; goto cleanup; } if ( info->xmit_cnt ) { /* Send accumulated from send_char() calls */ /* as frame and wait before accepting more data. */ ret = 0; /* copy data from circular xmit_buf to */ /* transmit DMA buffer. */ mgsl_load_tx_dma_buffer(info, info->xmit_buf,info->xmit_cnt); if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n", __FILE__,__LINE__,info->device_name); } else { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n", __FILE__,__LINE__,info->device_name); ret = count; info->xmit_cnt = count; mgsl_load_tx_dma_buffer(info,buf,count); } } else { while (1) { spin_lock_irqsave(&info->irq_spinlock,flags); c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, SERIAL_XMIT_SIZE - info->xmit_head)); if (c <= 0) { spin_unlock_irqrestore(&info->irq_spinlock,flags); break; } memcpy(info->xmit_buf + info->xmit_head, buf, c); info->xmit_head = ((info->xmit_head + c) & (SERIAL_XMIT_SIZE-1)); info->xmit_cnt += c; spin_unlock_irqrestore(&info->irq_spinlock,flags); buf += c; count -= c; ret += c; } } if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { spin_lock_irqsave(&info->irq_spinlock,flags); if (!info->tx_active) usc_start_transmitter(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } cleanup: if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_write(%s) returning=%d\n", __FILE__,__LINE__,info->device_name,ret); return ret; } /* end of mgsl_write() */ /* mgsl_write_room() * * Return the count of free bytes in transmit buffer * * Arguments: tty pointer to tty info structure * Return Value: None */ static int mgsl_write_room(struct tty_struct *tty) { struct mgsl_struct *info = tty->driver_data; int ret; if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) return 0; ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; if (ret < 0) ret = 0; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_write_room(%s)=%d\n", __FILE__,__LINE__, info->device_name,ret ); if ( info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW ) { /* operating in synchronous (frame oriented) mode */ if ( info->tx_active ) return 0; else return HDLC_MAX_FRAME_SIZE; } return ret; } /* end of mgsl_write_room() */ /* mgsl_chars_in_buffer() * * Return the count of bytes in transmit buffer * * Arguments: tty pointer to tty info structure * Return Value: None */ static int mgsl_chars_in_buffer(struct tty_struct *tty) { struct mgsl_struct *info = tty->driver_data; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_chars_in_buffer(%s)\n", __FILE__,__LINE__, info->device_name ); if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer")) return 0; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n", __FILE__,__LINE__, info->device_name,info->xmit_cnt ); if ( info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW ) { /* operating in synchronous (frame oriented) mode */ if ( info->tx_active ) return info->max_frame_size; else return 0; } return info->xmit_cnt; } /* end of mgsl_chars_in_buffer() */ /* mgsl_flush_buffer() * * Discard all data in the send buffer * * Arguments: tty pointer to tty info structure * Return Value: None */ static void mgsl_flush_buffer(struct tty_struct *tty) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_flush_buffer(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer")) return; spin_lock_irqsave(&info->irq_spinlock,flags); info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; del_timer(&info->tx_timer); spin_unlock_irqrestore(&info->irq_spinlock,flags); tty_wakeup(tty); } /* mgsl_send_xchar() * * Send a high-priority XON/XOFF character * * Arguments: tty pointer to tty info structure * ch character to send * Return Value: None */ static void mgsl_send_xchar(struct tty_struct *tty, char ch) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_send_xchar(%s,%d)\n", __FILE__,__LINE__, info->device_name, ch ); if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar")) return; info->x_char = ch; if (ch) { /* Make sure transmit interrupts are on */ spin_lock_irqsave(&info->irq_spinlock,flags); if (!info->tx_enabled) usc_start_transmitter(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } } /* end of mgsl_send_xchar() */ /* mgsl_throttle() * * Signal remote device to throttle send data (our receive data) * * Arguments: tty pointer to tty info structure * Return Value: None */ static void mgsl_throttle(struct tty_struct * tty) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_throttle(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle")) return; if (I_IXOFF(tty)) mgsl_send_xchar(tty, STOP_CHAR(tty)); if (tty->termios.c_cflag & CRTSCTS) { spin_lock_irqsave(&info->irq_spinlock,flags); info->serial_signals &= ~SerialSignal_RTS; usc_set_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } } /* end of mgsl_throttle() */ /* mgsl_unthrottle() * * Signal remote device to stop throttling send data (our receive data) * * Arguments: tty pointer to tty info structure * Return Value: None */ static void mgsl_unthrottle(struct tty_struct * tty) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_unthrottle(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle")) return; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else mgsl_send_xchar(tty, START_CHAR(tty)); } if (tty->termios.c_cflag & CRTSCTS) { spin_lock_irqsave(&info->irq_spinlock,flags); info->serial_signals |= SerialSignal_RTS; usc_set_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } } /* end of mgsl_unthrottle() */ /* mgsl_get_stats() * * get the current serial parameters information * * Arguments: info pointer to device instance data * user_icount pointer to buffer to hold returned stats * * Return Value: 0 if success, otherwise error code */ static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_get_params(%s)\n", __FILE__,__LINE__, info->device_name); if (!user_icount) { memset(&info->icount, 0, sizeof(info->icount)); } else { mutex_lock(&info->port.mutex); COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); mutex_unlock(&info->port.mutex); if (err) return -EFAULT; } return 0; } /* end of mgsl_get_stats() */ /* mgsl_get_params() * * get the current serial parameters information * * Arguments: info pointer to device instance data * user_params pointer to buffer to hold returned params * * Return Value: 0 if success, otherwise error code */ static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_get_params(%s)\n", __FILE__,__LINE__, info->device_name); mutex_lock(&info->port.mutex); COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); mutex_unlock(&info->port.mutex); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } return 0; } /* end of mgsl_get_params() */ /* mgsl_set_params() * * set the serial parameters * * Arguments: * * info pointer to device instance data * new_params user buffer containing new serial params * * Return Value: 0 if success, otherwise error code */ static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params) { unsigned long flags; MGSL_PARAMS tmp_params; int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__, info->device_name ); COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } mutex_lock(&info->port.mutex); spin_lock_irqsave(&info->irq_spinlock,flags); memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); spin_unlock_irqrestore(&info->irq_spinlock,flags); mgsl_change_params(info); mutex_unlock(&info->port.mutex); return 0; } /* end of mgsl_set_params() */ /* mgsl_get_txidle() * * get the current transmit idle mode * * Arguments: info pointer to device instance data * idle_mode pointer to buffer to hold returned idle mode * * Return Value: 0 if success, otherwise error code */ static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_get_txidle(%s)=%d\n", __FILE__,__LINE__, info->device_name, info->idle_mode); COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } return 0; } /* end of mgsl_get_txidle() */ /* mgsl_set_txidle() service ioctl to set transmit idle mode * * Arguments: info pointer to device instance data * idle_mode new idle mode * * Return Value: 0 if success, otherwise error code */ static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__, info->device_name, idle_mode ); spin_lock_irqsave(&info->irq_spinlock,flags); info->idle_mode = idle_mode; usc_set_txidle( info ); spin_unlock_irqrestore(&info->irq_spinlock,flags); return 0; } /* end of mgsl_set_txidle() */ /* mgsl_txenable() * * enable or disable the transmitter * * Arguments: * * info pointer to device instance data * enable 1 = enable, 0 = disable * * Return Value: 0 if success, otherwise error code */ static int mgsl_txenable(struct mgsl_struct * info, int enable) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__, info->device_name, enable); spin_lock_irqsave(&info->irq_spinlock,flags); if ( enable ) { if ( !info->tx_enabled ) { usc_start_transmitter(info); /*-------------------------------------------------- * if HDLC/SDLC Loop mode, attempt to insert the * station in the 'loop' by setting CMR:13. Upon * receipt of the next GoAhead (RxAbort) sequence, * the OnLoop indicator (CCSR:7) should go active * to indicate that we are on the loop *--------------------------------------------------*/ if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) usc_loopmode_insert_request( info ); } } else { if ( info->tx_enabled ) usc_stop_transmitter(info); } spin_unlock_irqrestore(&info->irq_spinlock,flags); return 0; } /* end of mgsl_txenable() */ /* mgsl_txabort() abort send HDLC frame * * Arguments: info pointer to device instance data * Return Value: 0 if success, otherwise error code */ static int mgsl_txabort(struct mgsl_struct * info) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__, info->device_name); spin_lock_irqsave(&info->irq_spinlock,flags); if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) { if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) usc_loopmode_cancel_transmit( info ); else usc_TCmd(info,TCmd_SendAbort); } spin_unlock_irqrestore(&info->irq_spinlock,flags); return 0; } /* end of mgsl_txabort() */ /* mgsl_rxenable() enable or disable the receiver * * Arguments: info pointer to device instance data * enable 1 = enable, 0 = disable * Return Value: 0 if success, otherwise error code */ static int mgsl_rxenable(struct mgsl_struct * info, int enable) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__, info->device_name, enable); spin_lock_irqsave(&info->irq_spinlock,flags); if ( enable ) { if ( !info->rx_enabled ) usc_start_receiver(info); } else { if ( info->rx_enabled ) usc_stop_receiver(info); } spin_unlock_irqrestore(&info->irq_spinlock,flags); return 0; } /* end of mgsl_rxenable() */ /* mgsl_wait_event() wait for specified event to occur * * Arguments: info pointer to device instance data * mask pointer to bitmask of events to wait for * Return Value: 0 if successful and bit mask updated with * of events triggerred, * otherwise error code */ static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr) { unsigned long flags; int s; int rc=0; struct mgsl_icount cprev, cnow; int events; int mask; struct _input_signal_events oldsigs, newsigs; DECLARE_WAITQUEUE(wait, current); COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); if (rc) { return -EFAULT; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__, info->device_name, mask); spin_lock_irqsave(&info->irq_spinlock,flags); /* return immediately if state matches requested events */ usc_get_serial_signals(info); s = info->serial_signals; events = mask & ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); if (events) { spin_unlock_irqrestore(&info->irq_spinlock,flags); goto exit; } /* save current irq counts */ cprev = info->icount; oldsigs = info->input_signal_events; /* enable hunt and idle irqs if needed */ if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { u16 oldreg = usc_InReg(info,RICR); u16 newreg = oldreg + (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) + (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0); if (oldreg != newreg) usc_OutReg(info, RICR, newreg); } set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&info->event_wait_q, &wait); spin_unlock_irqrestore(&info->irq_spinlock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get current irq counts */ spin_lock_irqsave(&info->irq_spinlock,flags); cnow = info->icount; newsigs = info->input_signal_events; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->irq_spinlock,flags); /* if no change, wait aborted for some reason */ if (newsigs.dsr_up == oldsigs.dsr_up && newsigs.dsr_down == oldsigs.dsr_down && newsigs.dcd_up == oldsigs.dcd_up && newsigs.dcd_down == oldsigs.dcd_down && newsigs.cts_up == oldsigs.cts_up && newsigs.cts_down == oldsigs.cts_down && newsigs.ri_up == oldsigs.ri_up && newsigs.ri_down == oldsigs.ri_down && cnow.exithunt == cprev.exithunt && cnow.rxidle == cprev.rxidle) { rc = -EIO; break; } events = mask & ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); if (events) break; cprev = cnow; oldsigs = newsigs; } remove_wait_queue(&info->event_wait_q, &wait); set_current_state(TASK_RUNNING); if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { spin_lock_irqsave(&info->irq_spinlock,flags); if (!waitqueue_active(&info->event_wait_q)) { /* disable enable exit hunt mode/idle rcvd IRQs */ usc_OutReg(info, RICR, usc_InReg(info,RICR) & ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)); } spin_unlock_irqrestore(&info->irq_spinlock,flags); } exit: if ( rc == 0 ) PUT_USER(rc, events, mask_ptr); return rc; } /* end of mgsl_wait_event() */ static int modem_input_wait(struct mgsl_struct *info,int arg) { unsigned long flags; int rc; struct mgsl_icount cprev, cnow; DECLARE_WAITQUEUE(wait, current); /* save current irq counts */ spin_lock_irqsave(&info->irq_spinlock,flags); cprev = info->icount; add_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->irq_spinlock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get new irq counts */ spin_lock_irqsave(&info->irq_spinlock,flags); cnow = info->icount; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->irq_spinlock,flags); /* if no change, wait aborted for some reason */ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { rc = -EIO; break; } /* check for change in caller specified modem input */ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { rc = 0; break; } cprev = cnow; } remove_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_RUNNING); return rc; } /* return the state of the serial control and status signals */ static int tiocmget(struct tty_struct *tty) { struct mgsl_struct *info = tty->driver_data; unsigned int result; unsigned long flags; spin_lock_irqsave(&info->irq_spinlock,flags); usc_get_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmget() value=%08X\n", __FILE__,__LINE__, info->device_name, result ); return result; } /* set modem control signals (DTR/RTS) */ static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmset(%x,%x)\n", __FILE__,__LINE__,info->device_name, set, clear); if (set & TIOCM_RTS) info->serial_signals |= SerialSignal_RTS; if (set & TIOCM_DTR) info->serial_signals |= SerialSignal_DTR; if (clear & TIOCM_RTS) info->serial_signals &= ~SerialSignal_RTS; if (clear & TIOCM_DTR) info->serial_signals &= ~SerialSignal_DTR; spin_lock_irqsave(&info->irq_spinlock,flags); usc_set_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); return 0; } /* mgsl_break() Set or clear transmit break condition * * Arguments: tty pointer to tty instance data * break_state -1=set break condition, 0=clear * Return Value: error code */ static int mgsl_break(struct tty_struct *tty, int break_state) { struct mgsl_struct * info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_break(%s,%d)\n", __FILE__,__LINE__, info->device_name, break_state); if (mgsl_paranoia_check(info, tty->name, "mgsl_break")) return -EINVAL; spin_lock_irqsave(&info->irq_spinlock,flags); if (break_state == -1) usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7)); else usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7)); spin_unlock_irqrestore(&info->irq_spinlock,flags); return 0; } /* end of mgsl_break() */ /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ static int msgl_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct mgsl_struct * info = tty->driver_data; struct mgsl_icount cnow; /* kernel counter temps */ unsigned long flags; spin_lock_irqsave(&info->irq_spinlock,flags); cnow = info->icount; spin_unlock_irqrestore(&info->irq_spinlock,flags); icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } /* mgsl_ioctl() Service an IOCTL request * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg command argument/context * * Return Value: 0 if success, otherwise error code */ static int mgsl_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct mgsl_struct * info = tty->driver_data; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, info->device_name, cmd ); if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl")) return -ENODEV; if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCMIWAIT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } return mgsl_ioctl_common(info, cmd, arg); } static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; switch (cmd) { case MGSL_IOCGPARAMS: return mgsl_get_params(info, argp); case MGSL_IOCSPARAMS: return mgsl_set_params(info, argp); case MGSL_IOCGTXIDLE: return mgsl_get_txidle(info, argp); case MGSL_IOCSTXIDLE: return mgsl_set_txidle(info,(int)arg); case MGSL_IOCTXENABLE: return mgsl_txenable(info,(int)arg); case MGSL_IOCRXENABLE: return mgsl_rxenable(info,(int)arg); case MGSL_IOCTXABORT: return mgsl_txabort(info); case MGSL_IOCGSTATS: return mgsl_get_stats(info, argp); case MGSL_IOCWAITEVENT: return mgsl_wait_event(info, argp); case MGSL_IOCLOOPTXDONE: return mgsl_loopmode_send_done(info); /* Wait for modem input (DCD,RI,DSR,CTS) change * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) */ case TIOCMIWAIT: return modem_input_wait(info,(int)arg); default: return -ENOIOCTLCMD; } return 0; } /* mgsl_set_termios() * * Set new termios settings * * Arguments: * * tty pointer to tty structure * termios pointer to buffer to hold returned old termios * * Return Value: None */ static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__, tty->driver->name ); mgsl_change_params(info); /* Handle transition to B0 status */ if (old_termios->c_cflag & CBAUD && !(tty->termios.c_cflag & CBAUD)) { info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); spin_lock_irqsave(&info->irq_spinlock,flags); usc_set_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && tty->termios.c_cflag & CBAUD) { info->serial_signals |= SerialSignal_DTR; if (!(tty->termios.c_cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) { info->serial_signals |= SerialSignal_RTS; } spin_lock_irqsave(&info->irq_spinlock,flags); usc_set_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } /* Handle turning off CRTSCTS */ if (old_termios->c_cflag & CRTSCTS && !(tty->termios.c_cflag & CRTSCTS)) { tty->hw_stopped = 0; mgsl_start(tty); } } /* end of mgsl_set_termios() */ /* mgsl_close() * * Called when port is closed. Wait for remaining data to be * sent. Disable port and free resources. * * Arguments: * * tty pointer to open tty structure * filp pointer to open file object * * Return Value: None */ static void mgsl_close(struct tty_struct *tty, struct file * filp) { struct mgsl_struct * info = tty->driver_data; if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_close(%s) entry, count=%d\n", __FILE__,__LINE__, info->device_name, info->port.count); if (tty_port_close_start(&info->port, tty, filp) == 0) goto cleanup; mutex_lock(&info->port.mutex); if (info->port.flags & ASYNC_INITIALIZED) mgsl_wait_until_sent(tty, info->timeout); mgsl_flush_buffer(tty); tty_ldisc_flush(tty); shutdown(info); mutex_unlock(&info->port.mutex); tty_port_close_end(&info->port, tty); info->port.tty = NULL; cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, tty->driver->name, info->port.count); } /* end of mgsl_close() */ /* mgsl_wait_until_sent() * * Wait until the transmitter is empty. * * Arguments: * * tty pointer to tty info structure * timeout time to wait for send completion * * Return Value: None */ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) { struct mgsl_struct * info = tty->driver_data; unsigned long orig_jiffies, char_time; if (!info ) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_wait_until_sent(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent")) return; if (!(info->port.flags & ASYNC_INITIALIZED)) goto exit; orig_jiffies = jiffies; /* Set check interval to 1/5 of estimated time to * send a character, and make it at least 1. The check * interval should also be less than the timeout. * Note: use tight timings here to satisfy the NIST-PCTS. */ if ( info->params.data_rate ) { char_time = info->timeout/(32 * 5); if (!char_time) char_time++; } else char_time = 1; if (timeout) char_time = min_t(unsigned long, char_time, timeout); if ( info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW ) { while (info->tx_active) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } else { while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) && info->tx_enabled) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } exit: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_wait_until_sent(%s) exit\n", __FILE__,__LINE__, info->device_name ); } /* end of mgsl_wait_until_sent() */ /* mgsl_hangup() * * Called by tty_hangup() when a hangup is signaled. * This is the same as to closing all open files for the port. * * Arguments: tty pointer to associated tty object * Return Value: None */ static void mgsl_hangup(struct tty_struct *tty) { struct mgsl_struct * info = tty->driver_data; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_hangup(%s)\n", __FILE__,__LINE__, info->device_name ); if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup")) return; mgsl_flush_buffer(tty); shutdown(info); info->port.count = 0; info->port.flags &= ~ASYNC_NORMAL_ACTIVE; info->port.tty = NULL; wake_up_interruptible(&info->port.open_wait); } /* end of mgsl_hangup() */ /* * carrier_raised() * * Return true if carrier is raised */ static int carrier_raised(struct tty_port *port) { unsigned long flags; struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); spin_lock_irqsave(&info->irq_spinlock, flags); usc_get_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock, flags); return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; } static void dtr_rts(struct tty_port *port, int on) { struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); unsigned long flags; spin_lock_irqsave(&info->irq_spinlock,flags); if (on) info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); usc_set_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } /* block_til_ready() * * Block the current process until the specified port * is ready to be opened. * * Arguments: * * tty pointer to tty info structure * filp pointer to open file object * info pointer to device instance data * * Return Value: 0 if success, otherwise error code */ static int block_til_ready(struct tty_struct *tty, struct file * filp, struct mgsl_struct *info) { DECLARE_WAITQUEUE(wait, current); int retval; bool do_clocal = false; bool extra_count = false; unsigned long flags; int dcd; struct tty_port *port = &info->port; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready on %s\n", __FILE__,__LINE__, tty->driver->name ); if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ /* nonblock mode is set or port is not enabled */ port->flags |= ASYNC_NORMAL_ACTIVE; return 0; } if (tty->termios.c_cflag & CLOCAL) do_clocal = true; /* Wait for carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, port->count is dropped by one, so that * mgsl_close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&port->open_wait, &wait); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready before block on %s count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); spin_lock_irqsave(&info->irq_spinlock, flags); if (!tty_hung_up_p(filp)) { extra_count = true; port->count--; } spin_unlock_irqrestore(&info->irq_spinlock, flags); port->blocked_open++; while (1) { if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags)) tty_port_raise_dtr_rts(port); set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ retval = (port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS; break; } dcd = tty_port_carrier_raised(&info->port); if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd)) break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready blocking on %s count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); tty_unlock(tty); schedule(); tty_lock(tty); } set_current_state(TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); /* FIXME: Racy on hangup during close wait */ if (extra_count) port->count++; port->blocked_open--; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready after blocking on %s count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); if (!retval) port->flags |= ASYNC_NORMAL_ACTIVE; return retval; } /* end of block_til_ready() */ static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty) { struct mgsl_struct *info; int line = tty->index; /* verify range of specified line number */ if (line >= mgsl_device_count) { printk("%s(%d):mgsl_open with invalid line #%d.\n", __FILE__, __LINE__, line); return -ENODEV; } /* find the info structure for the specified line */ info = mgsl_device_list; while (info && info->line != line) info = info->next_device; if (mgsl_paranoia_check(info, tty->name, "mgsl_open")) return -ENODEV; tty->driver_data = info; return tty_port_install(&info->port, driver, tty); } /* mgsl_open() * * Called when a port is opened. Init and enable port. * Perform serial-specific initialization for the tty structure. * * Arguments: tty pointer to tty info structure * filp associated file pointer * * Return Value: 0 if success, otherwise error code */ static int mgsl_open(struct tty_struct *tty, struct file * filp) { struct mgsl_struct *info = tty->driver_data; unsigned long flags; int retval; info->port.tty = tty; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_open(%s), old ref count = %d\n", __FILE__,__LINE__,tty->driver->name, info->port.count); /* If port is closing, signal caller to try again */ if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){ if (info->port.flags & ASYNC_CLOSING) interruptible_sleep_on(&info->port.close_wait); retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); goto cleanup; } info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; spin_lock_irqsave(&info->netlock, flags); if (info->netcount) { retval = -EBUSY; spin_unlock_irqrestore(&info->netlock, flags); goto cleanup; } info->port.count++; spin_unlock_irqrestore(&info->netlock, flags); if (info->port.count == 1) { /* 1st open on this device, init hardware */ retval = startup(info); if (retval < 0) goto cleanup; } retval = block_til_ready(tty, filp, info); if (retval) { if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready(%s) returned %d\n", __FILE__,__LINE__, info->device_name, retval); goto cleanup; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgsl_open(%s) success\n", __FILE__,__LINE__, info->device_name); retval = 0; cleanup: if (retval) { if (tty->count == 1) info->port.tty = NULL; /* tty layer will release tty struct */ if(info->port.count) info->port.count--; } return retval; } /* end of mgsl_open() */ /* * /proc fs routines.... */ static inline void line_info(struct seq_file *m, struct mgsl_struct *info) { char stat_buf[30]; unsigned long flags; if (info->bus_type == MGSL_BUS_TYPE_PCI) { seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X", info->device_name, info->io_base, info->irq_level, info->phys_memory_base, info->phys_lcr_base); } else { seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d", info->device_name, info->io_base, info->irq_level, info->dma_level); } /* output current serial signal states */ spin_lock_irqsave(&info->irq_spinlock,flags); usc_get_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); stat_buf[0] = 0; stat_buf[1] = 0; if (info->serial_signals & SerialSignal_RTS) strcat(stat_buf, "|RTS"); if (info->serial_signals & SerialSignal_CTS) strcat(stat_buf, "|CTS"); if (info->serial_signals & SerialSignal_DTR) strcat(stat_buf, "|DTR"); if (info->serial_signals & SerialSignal_DSR) strcat(stat_buf, "|DSR"); if (info->serial_signals & SerialSignal_DCD) strcat(stat_buf, "|CD"); if (info->serial_signals & SerialSignal_RI) strcat(stat_buf, "|RI"); if (info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW ) { seq_printf(m, " HDLC txok:%d rxok:%d", info->icount.txok, info->icount.rxok); if (info->icount.txunder) seq_printf(m, " txunder:%d", info->icount.txunder); if (info->icount.txabort) seq_printf(m, " txabort:%d", info->icount.txabort); if (info->icount.rxshort) seq_printf(m, " rxshort:%d", info->icount.rxshort); if (info->icount.rxlong) seq_printf(m, " rxlong:%d", info->icount.rxlong); if (info->icount.rxover) seq_printf(m, " rxover:%d", info->icount.rxover); if (info->icount.rxcrc) seq_printf(m, " rxcrc:%d", info->icount.rxcrc); } else { seq_printf(m, " ASYNC tx:%d rx:%d", info->icount.tx, info->icount.rx); if (info->icount.frame) seq_printf(m, " fe:%d", info->icount.frame); if (info->icount.parity) seq_printf(m, " pe:%d", info->icount.parity); if (info->icount.brk) seq_printf(m, " brk:%d", info->icount.brk); if (info->icount.overrun) seq_printf(m, " oe:%d", info->icount.overrun); } /* Append serial signal status to end */ seq_printf(m, " %s\n", stat_buf+1); seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", info->tx_active,info->bh_requested,info->bh_running, info->pending_bh); spin_lock_irqsave(&info->irq_spinlock,flags); { u16 Tcsr = usc_InReg( info, TCSR ); u16 Tdmr = usc_InDmaReg( info, TDMR ); u16 Ticr = usc_InReg( info, TICR ); u16 Rscr = usc_InReg( info, RCSR ); u16 Rdmr = usc_InDmaReg( info, RDMR ); u16 Ricr = usc_InReg( info, RICR ); u16 Icr = usc_InReg( info, ICR ); u16 Dccr = usc_InReg( info, DCCR ); u16 Tmr = usc_InReg( info, TMR ); u16 Tccr = usc_InReg( info, TCCR ); u16 Ccar = inw( info->io_base + CCAR ); seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n" "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n", Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar ); } spin_unlock_irqrestore(&info->irq_spinlock,flags); } /* Called to print information about devices */ static int mgsl_proc_show(struct seq_file *m, void *v) { struct mgsl_struct *info; seq_printf(m, "synclink driver:%s\n", driver_version); info = mgsl_device_list; while( info ) { line_info(m, info); info = info->next_device; } return 0; } static int mgsl_proc_open(struct inode *inode, struct file *file) { return single_open(file, mgsl_proc_show, NULL); } static const struct file_operations mgsl_proc_fops = { .owner = THIS_MODULE, .open = mgsl_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* mgsl_allocate_dma_buffers() * * Allocate and format DMA buffers (ISA adapter) * or format shared memory buffers (PCI adapter). * * Arguments: info pointer to device instance data * Return Value: 0 if success, otherwise error */ static int mgsl_allocate_dma_buffers(struct mgsl_struct *info) { unsigned short BuffersPerFrame; info->last_mem_alloc = 0; /* Calculate the number of DMA buffers necessary to hold the */ /* largest allowable frame size. Note: If the max frame size is */ /* not an even multiple of the DMA buffer size then we need to */ /* round the buffer count per frame up one. */ BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE); if ( info->max_frame_size % DMABUFFERSIZE ) BuffersPerFrame++; if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { /* * The PCI adapter has 256KBytes of shared memory to use. * This is 64 PAGE_SIZE buffers. * * The first page is used for padding at this time so the * buffer list does not begin at offset 0 of the PCI * adapter's shared memory. * * The 2nd page is used for the buffer list. A 4K buffer * list can hold 128 DMA_BUFFER structures at 32 bytes * each. * * This leaves 62 4K pages. * * The next N pages are used for transmit frame(s). We * reserve enough 4K page blocks to hold the required * number of transmit dma buffers (num_tx_dma_buffers), * each of MaxFrameSize size. * * Of the remaining pages (62-N), determine how many can * be used to receive full MaxFrameSize inbound frames */ info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; info->rx_buffer_count = 62 - info->tx_buffer_count; } else { /* Calculate the number of PAGE_SIZE buffers needed for */ /* receive and transmit DMA buffers. */ /* Calculate the number of DMA buffers necessary to */ /* hold 7 max size receive frames and one max size transmit frame. */ /* The receive buffer count is bumped by one so we avoid an */ /* End of List condition if all receive buffers are used when */ /* using linked list DMA buffers. */ info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6; /* * limit total TxBuffers & RxBuffers to 62 4K total * (ala PCI Allocation) */ if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 ) info->rx_buffer_count = 62 - info->tx_buffer_count; } if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n", __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count); if ( mgsl_alloc_buffer_list_memory( info ) < 0 || mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 || mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) { printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__); return -ENOMEM; } mgsl_reset_rx_dma_buffers( info ); mgsl_reset_tx_dma_buffers( info ); return 0; } /* end of mgsl_allocate_dma_buffers() */ /* * mgsl_alloc_buffer_list_memory() * * Allocate a common DMA buffer for use as the * receive and transmit buffer lists. * * A buffer list is a set of buffer entries where each entry contains * a pointer to an actual buffer and a pointer to the next buffer entry * (plus some other info about the buffer). * * The buffer entries for a list are built to form a circular list so * that when the entire list has been traversed you start back at the * beginning. * * This function allocates memory for just the buffer entries. * The links (pointer to next entry) are filled in with the physical * address of the next entry so the adapter can navigate the list * using bus master DMA. The pointers to the actual buffers are filled * out later when the actual buffers are allocated. * * Arguments: info pointer to device instance data * Return Value: 0 if success, otherwise error */ static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info ) { unsigned int i; if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { /* PCI adapter uses shared memory. */ info->buffer_list = info->memory_base + info->last_mem_alloc; info->buffer_list_phys = info->last_mem_alloc; info->last_mem_alloc += BUFFERLISTSIZE; } else { /* ISA adapter uses system memory. */ /* The buffer lists are allocated as a common buffer that both */ /* the processor and adapter can access. This allows the driver to */ /* inspect portions of the buffer while other portions are being */ /* updated by the adapter using Bus Master DMA. */ info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL); if (info->buffer_list == NULL) return -ENOMEM; info->buffer_list_phys = (u32)(info->buffer_list_dma_addr); } /* We got the memory for the buffer entry lists. */ /* Initialize the memory block to all zeros. */ memset( info->buffer_list, 0, BUFFERLISTSIZE ); /* Save virtual address pointers to the receive and */ /* transmit buffer lists. (Receive 1st). These pointers will */ /* be used by the processor to access the lists. */ info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; info->tx_buffer_list += info->rx_buffer_count; /* * Build the links for the buffer entry lists such that * two circular lists are built. (Transmit and Receive). * * Note: the links are physical addresses * which are read by the adapter to determine the next * buffer entry to use. */ for ( i = 0; i < info->rx_buffer_count; i++ ) { /* calculate and store physical address of this buffer entry */ info->rx_buffer_list[i].phys_entry = info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY)); /* calculate and store physical address of */ /* next entry in cirular list of entries */ info->rx_buffer_list[i].link = info->buffer_list_phys; if ( i < info->rx_buffer_count - 1 ) info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); } for ( i = 0; i < info->tx_buffer_count; i++ ) { /* calculate and store physical address of this buffer entry */ info->tx_buffer_list[i].phys_entry = info->buffer_list_phys + ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY)); /* calculate and store physical address of */ /* next entry in cirular list of entries */ info->tx_buffer_list[i].link = info->buffer_list_phys + info->rx_buffer_count * sizeof(DMABUFFERENTRY); if ( i < info->tx_buffer_count - 1 ) info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); } return 0; } /* end of mgsl_alloc_buffer_list_memory() */ /* Free DMA buffers allocated for use as the * receive and transmit buffer lists. * Warning: * * The data transfer buffers associated with the buffer list * MUST be freed before freeing the buffer list itself because * the buffer list contains the information necessary to free * the individual buffers! */ static void mgsl_free_buffer_list_memory( struct mgsl_struct *info ) { if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI) dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr); info->buffer_list = NULL; info->rx_buffer_list = NULL; info->tx_buffer_list = NULL; } /* end of mgsl_free_buffer_list_memory() */ /* * mgsl_alloc_frame_memory() * * Allocate the frame DMA buffers used by the specified buffer list. * Each DMA buffer will be one memory page in size. This is necessary * because memory can fragment enough that it may be impossible * contiguous pages. * * Arguments: * * info pointer to device instance data * BufferList pointer to list of buffer entries * Buffercount count of buffer entries in buffer list * * Return Value: 0 if success, otherwise -ENOMEM */ static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount) { int i; u32 phys_addr; /* Allocate page sized buffers for the receive buffer list */ for ( i = 0; i < Buffercount; i++ ) { if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { /* PCI adapter uses shared memory buffers. */ BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc; phys_addr = info->last_mem_alloc; info->last_mem_alloc += DMABUFFERSIZE; } else { /* ISA adapter uses system memory. */ BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL); if (BufferList[i].virt_addr == NULL) return -ENOMEM; phys_addr = (u32)(BufferList[i].dma_addr); } BufferList[i].phys_addr = phys_addr; } return 0; } /* end of mgsl_alloc_frame_memory() */ /* * mgsl_free_frame_memory() * * Free the buffers associated with * each buffer entry of a buffer list. * * Arguments: * * info pointer to device instance data * BufferList pointer to list of buffer entries * Buffercount count of buffer entries in buffer list * * Return Value: None */ static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount) { int i; if ( BufferList ) { for ( i = 0 ; i < Buffercount ; i++ ) { if ( BufferList[i].virt_addr ) { if ( info->bus_type != MGSL_BUS_TYPE_PCI ) dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr); BufferList[i].virt_addr = NULL; } } } } /* end of mgsl_free_frame_memory() */ /* mgsl_free_dma_buffers() * * Free DMA buffers * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_free_dma_buffers( struct mgsl_struct *info ) { mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count ); mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count ); mgsl_free_buffer_list_memory( info ); } /* end of mgsl_free_dma_buffers() */ /* * mgsl_alloc_intermediate_rxbuffer_memory() * * Allocate a buffer large enough to hold max_frame_size. This buffer * is used to pass an assembled frame to the line discipline. * * Arguments: * * info pointer to device instance data * * Return Value: 0 if success, otherwise -ENOMEM */ static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) { info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA); if ( info->intermediate_rxbuffer == NULL ) return -ENOMEM; /* unused flag buffer to satisfy receive_buf calling interface */ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL); if (!info->flag_buf) { kfree(info->intermediate_rxbuffer); info->intermediate_rxbuffer = NULL; return -ENOMEM; } return 0; } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */ /* * mgsl_free_intermediate_rxbuffer_memory() * * * Arguments: * * info pointer to device instance data * * Return Value: None */ static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) { kfree(info->intermediate_rxbuffer); info->intermediate_rxbuffer = NULL; kfree(info->flag_buf); info->flag_buf = NULL; } /* end of mgsl_free_intermediate_rxbuffer_memory() */ /* * mgsl_alloc_intermediate_txbuffer_memory() * * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size. * This buffer is used to load transmit frames into the adapter's dma transfer * buffers when there is sufficient space. * * Arguments: * * info pointer to device instance data * * Return Value: 0 if success, otherwise -ENOMEM */ static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info) { int i; if ( debug_level >= DEBUG_LEVEL_INFO ) printk("%s %s(%d) allocating %d tx holding buffers\n", info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers); memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers)); for ( i=0; i<info->num_tx_holding_buffers; ++i) { info->tx_holding_buffers[i].buffer = kmalloc(info->max_frame_size, GFP_KERNEL); if (info->tx_holding_buffers[i].buffer == NULL) { for (--i; i >= 0; i--) { kfree(info->tx_holding_buffers[i].buffer); info->tx_holding_buffers[i].buffer = NULL; } return -ENOMEM; } } return 0; } /* end of mgsl_alloc_intermediate_txbuffer_memory() */ /* * mgsl_free_intermediate_txbuffer_memory() * * * Arguments: * * info pointer to device instance data * * Return Value: None */ static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) { int i; for ( i=0; i<info->num_tx_holding_buffers; ++i ) { kfree(info->tx_holding_buffers[i].buffer); info->tx_holding_buffers[i].buffer = NULL; } info->get_tx_holding_index = 0; info->put_tx_holding_index = 0; info->tx_holding_count = 0; } /* end of mgsl_free_intermediate_txbuffer_memory() */ /* * load_next_tx_holding_buffer() * * attempts to load the next buffered tx request into the * tx dma buffers * * Arguments: * * info pointer to device instance data * * Return Value: true if next buffered tx request loaded * into adapter's tx dma buffer, * false otherwise */ static bool load_next_tx_holding_buffer(struct mgsl_struct *info) { bool ret = false; if ( info->tx_holding_count ) { /* determine if we have enough tx dma buffers * to accommodate the next tx frame */ struct tx_holding_buffer *ptx = &info->tx_holding_buffers[info->get_tx_holding_index]; int num_free = num_free_tx_dma_buffers(info); int num_needed = ptx->buffer_size / DMABUFFERSIZE; if ( ptx->buffer_size % DMABUFFERSIZE ) ++num_needed; if (num_needed <= num_free) { info->xmit_cnt = ptx->buffer_size; mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size); --info->tx_holding_count; if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers) info->get_tx_holding_index=0; /* restart transmit timer */ mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); ret = true; } } return ret; } /* * save_tx_buffer_request() * * attempt to store transmit frame request for later transmission * * Arguments: * * info pointer to device instance data * Buffer pointer to buffer containing frame to load * BufferSize size in bytes of frame in Buffer * * Return Value: 1 if able to store, 0 otherwise */ static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize) { struct tx_holding_buffer *ptx; if ( info->tx_holding_count >= info->num_tx_holding_buffers ) { return 0; /* all buffers in use */ } ptx = &info->tx_holding_buffers[info->put_tx_holding_index]; ptx->buffer_size = BufferSize; memcpy( ptx->buffer, Buffer, BufferSize); ++info->tx_holding_count; if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers) info->put_tx_holding_index=0; return 1; } static int mgsl_claim_resources(struct mgsl_struct *info) { if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) { printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->io_base); return -ENODEV; } info->io_addr_requested = true; if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags, info->device_name, info ) < 0 ) { printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n", __FILE__,__LINE__,info->device_name, info->irq_level ); goto errout; } info->irq_requested = true; if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) { printk( "%s(%d):mem addr conflict device %s Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base); goto errout; } info->shared_mem_requested = true; if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) { printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset); goto errout; } info->lcr_mem_requested = true; info->memory_base = ioremap_nocache(info->phys_memory_base, 0x40000); if (!info->memory_base) { printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base ); goto errout; } if ( !mgsl_memory_test(info) ) { printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_memory_base ); goto errout; } info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE); if (!info->lcr_base) { printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n", __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); goto errout; } info->lcr_base += info->lcr_offset; } else { /* claim DMA channel */ if (request_dma(info->dma_level,info->device_name) < 0){ printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n", __FILE__,__LINE__,info->device_name, info->dma_level ); mgsl_release_resources( info ); return -ENODEV; } info->dma_requested = true; /* ISA adapter uses bus master DMA */ set_dma_mode(info->dma_level,DMA_MODE_CASCADE); enable_dma(info->dma_level); } if ( mgsl_allocate_dma_buffers(info) < 0 ) { printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n", __FILE__,__LINE__,info->device_name, info->dma_level ); goto errout; } return 0; errout: mgsl_release_resources(info); return -ENODEV; } /* end of mgsl_claim_resources() */ static void mgsl_release_resources(struct mgsl_struct *info) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_release_resources(%s) entry\n", __FILE__,__LINE__,info->device_name ); if ( info->irq_requested ) { free_irq(info->irq_level, info); info->irq_requested = false; } if ( info->dma_requested ) { disable_dma(info->dma_level); free_dma(info->dma_level); info->dma_requested = false; } mgsl_free_dma_buffers(info); mgsl_free_intermediate_rxbuffer_memory(info); mgsl_free_intermediate_txbuffer_memory(info); if ( info->io_addr_requested ) { release_region(info->io_base,info->io_addr_size); info->io_addr_requested = false; } if ( info->shared_mem_requested ) { release_mem_region(info->phys_memory_base,0x40000); info->shared_mem_requested = false; } if ( info->lcr_mem_requested ) { release_mem_region(info->phys_lcr_base + info->lcr_offset,128); info->lcr_mem_requested = false; } if (info->memory_base){ iounmap(info->memory_base); info->memory_base = NULL; } if (info->lcr_base){ iounmap(info->lcr_base - info->lcr_offset); info->lcr_base = NULL; } if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_release_resources(%s) exit\n", __FILE__,__LINE__,info->device_name ); } /* end of mgsl_release_resources() */ /* mgsl_add_device() * * Add the specified device instance data structure to the * global linked list of devices and increment the device count. * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_add_device( struct mgsl_struct *info ) { info->next_device = NULL; info->line = mgsl_device_count; sprintf(info->device_name,"ttySL%d",info->line); if (info->line < MAX_TOTAL_DEVICES) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; if (txdmabufs[info->line]) { info->num_tx_dma_buffers = txdmabufs[info->line]; if (info->num_tx_dma_buffers < 1) info->num_tx_dma_buffers = 1; } if (txholdbufs[info->line]) { info->num_tx_holding_buffers = txholdbufs[info->line]; if (info->num_tx_holding_buffers < 1) info->num_tx_holding_buffers = 1; else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS) info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS; } } mgsl_device_count++; if ( !mgsl_device_list ) mgsl_device_list = info; else { struct mgsl_struct *current_dev = mgsl_device_list; while( current_dev->next_device ) current_dev = current_dev->next_device; current_dev->next_device = info; } if ( info->max_frame_size < 4096 ) info->max_frame_size = 4096; else if ( info->max_frame_size > 65535 ) info->max_frame_size = 65535; if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", info->hw_version + 1, info->device_name, info->io_base, info->irq_level, info->phys_memory_base, info->phys_lcr_base, info->max_frame_size ); } else { printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", info->device_name, info->io_base, info->irq_level, info->dma_level, info->max_frame_size ); } #if SYNCLINK_GENERIC_HDLC hdlcdev_init(info); #endif } /* end of mgsl_add_device() */ static const struct tty_port_operations mgsl_port_ops = { .carrier_raised = carrier_raised, .dtr_rts = dtr_rts, }; /* mgsl_allocate_device() * * Allocate and initialize a device instance structure * * Arguments: none * Return Value: pointer to mgsl_struct if success, otherwise NULL */ static struct mgsl_struct* mgsl_allocate_device(void) { struct mgsl_struct *info; info = kzalloc(sizeof(struct mgsl_struct), GFP_KERNEL); if (!info) { printk("Error can't allocate device instance data\n"); } else { tty_port_init(&info->port); info->port.ops = &mgsl_port_ops; info->magic = MGSL_MAGIC; INIT_WORK(&info->task, mgsl_bh_handler); info->max_frame_size = 4096; info->port.close_delay = 5*HZ/10; info->port.closing_wait = 30*HZ; init_waitqueue_head(&info->status_event_wait_q); init_waitqueue_head(&info->event_wait_q); spin_lock_init(&info->irq_spinlock); spin_lock_init(&info->netlock); memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); info->idle_mode = HDLC_TXIDLE_FLAGS; info->num_tx_dma_buffers = 1; info->num_tx_holding_buffers = 0; } return info; } /* end of mgsl_allocate_device()*/ static const struct tty_operations mgsl_ops = { .install = mgsl_install, .open = mgsl_open, .close = mgsl_close, .write = mgsl_write, .put_char = mgsl_put_char, .flush_chars = mgsl_flush_chars, .write_room = mgsl_write_room, .chars_in_buffer = mgsl_chars_in_buffer, .flush_buffer = mgsl_flush_buffer, .ioctl = mgsl_ioctl, .throttle = mgsl_throttle, .unthrottle = mgsl_unthrottle, .send_xchar = mgsl_send_xchar, .break_ctl = mgsl_break, .wait_until_sent = mgsl_wait_until_sent, .set_termios = mgsl_set_termios, .stop = mgsl_stop, .start = mgsl_start, .hangup = mgsl_hangup, .tiocmget = tiocmget, .tiocmset = tiocmset, .get_icount = msgl_get_icount, .proc_fops = &mgsl_proc_fops, }; /* * perform tty device initialization */ static int mgsl_init_tty(void) { int rc; serial_driver = alloc_tty_driver(128); if (!serial_driver) return -ENOMEM; serial_driver->driver_name = "synclink"; serial_driver->name = "ttySL"; serial_driver->major = ttymajor; serial_driver->minor_start = 64; serial_driver->type = TTY_DRIVER_TYPE_SERIAL; serial_driver->subtype = SERIAL_TYPE_NORMAL; serial_driver->init_termios = tty_std_termios; serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; serial_driver->init_termios.c_ispeed = 9600; serial_driver->init_termios.c_ospeed = 9600; serial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(serial_driver, &mgsl_ops); if ((rc = tty_register_driver(serial_driver)) < 0) { printk("%s(%d):Couldn't register serial driver\n", __FILE__,__LINE__); put_tty_driver(serial_driver); serial_driver = NULL; return rc; } printk("%s %s, tty major#%d\n", driver_name, driver_version, serial_driver->major); return 0; } /* enumerate user specified ISA adapters */ static void mgsl_enum_isa_devices(void) { struct mgsl_struct *info; int i; /* Check for user specified ISA devices */ for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){ if ( debug_level >= DEBUG_LEVEL_INFO ) printk("ISA device specified io=%04X,irq=%d,dma=%d\n", io[i], irq[i], dma[i] ); info = mgsl_allocate_device(); if ( !info ) { /* error allocating device instance data */ if ( debug_level >= DEBUG_LEVEL_ERROR ) printk( "can't allocate device instance data.\n"); continue; } /* Copy user configuration info to device instance data */ info->io_base = (unsigned int)io[i]; info->irq_level = (unsigned int)irq[i]; info->irq_level = irq_canonicalize(info->irq_level); info->dma_level = (unsigned int)dma[i]; info->bus_type = MGSL_BUS_TYPE_ISA; info->io_addr_size = 16; info->irq_flags = 0; mgsl_add_device( info ); } } static void synclink_cleanup(void) { int rc; struct mgsl_struct *info; struct mgsl_struct *tmp; printk("Unloading %s: %s\n", driver_name, driver_version); if (serial_driver) { if ((rc = tty_unregister_driver(serial_driver))) printk("%s(%d) failed to unregister tty driver err=%d\n", __FILE__,__LINE__,rc); put_tty_driver(serial_driver); } info = mgsl_device_list; while(info) { #if SYNCLINK_GENERIC_HDLC hdlcdev_exit(info); #endif mgsl_release_resources(info); tmp = info; info = info->next_device; tty_port_destroy(&tmp->port); kfree(tmp); } if (pci_registered) pci_unregister_driver(&synclink_pci_driver); } static int __init synclink_init(void) { int rc; if (break_on_load) { mgsl_get_text_ptr(); BREAKPOINT(); } printk("%s %s\n", driver_name, driver_version); mgsl_enum_isa_devices(); if ((rc = pci_register_driver(&synclink_pci_driver)) < 0) printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); else pci_registered = true; if ((rc = mgsl_init_tty()) < 0) goto error; return 0; error: synclink_cleanup(); return rc; } static void __exit synclink_exit(void) { synclink_cleanup(); } module_init(synclink_init); module_exit(synclink_exit); /* * usc_RTCmd() * * Issue a USC Receive/Transmit command to the * Channel Command/Address Register (CCAR). * * Notes: * * The command is encoded in the most significant 5 bits <15..11> * of the CCAR value. Bits <10..7> of the CCAR must be preserved * and Bits <6..0> must be written as zeros. * * Arguments: * * info pointer to device information structure * Cmd command mask (use symbolic macros) * * Return Value: * * None */ static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ) { /* output command to CCAR in bits <15..11> */ /* preserve bits <10..7>, bits <6..0> must be zero */ outw( Cmd + info->loopback_bits, info->io_base + CCAR ); /* Read to flush write to CCAR */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) inw( info->io_base + CCAR ); } /* end of usc_RTCmd() */ /* * usc_DmaCmd() * * Issue a DMA command to the DMA Command/Address Register (DCAR). * * Arguments: * * info pointer to device information structure * Cmd DMA command mask (usc_DmaCmd_XX Macros) * * Return Value: * * None */ static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ) { /* write command mask to DCAR */ outw( Cmd + info->mbre_bit, info->io_base ); /* Read to flush write to DCAR */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) inw( info->io_base ); } /* end of usc_DmaCmd() */ /* * usc_OutDmaReg() * * Write a 16-bit value to a USC DMA register * * Arguments: * * info pointer to device info structure * RegAddr register address (number) for write * RegValue 16-bit value to write to register * * Return Value: * * None * */ static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) { /* Note: The DCAR is located at the adapter base address */ /* Note: must preserve state of BIT8 in DCAR */ outw( RegAddr + info->mbre_bit, info->io_base ); outw( RegValue, info->io_base ); /* Read to flush write to DCAR */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) inw( info->io_base ); } /* end of usc_OutDmaReg() */ /* * usc_InDmaReg() * * Read a 16-bit value from a DMA register * * Arguments: * * info pointer to device info structure * RegAddr register address (number) to read from * * Return Value: * * The 16-bit value read from register * */ static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr ) { /* Note: The DCAR is located at the adapter base address */ /* Note: must preserve state of BIT8 in DCAR */ outw( RegAddr + info->mbre_bit, info->io_base ); return inw( info->io_base ); } /* end of usc_InDmaReg() */ /* * * usc_OutReg() * * Write a 16-bit value to a USC serial channel register * * Arguments: * * info pointer to device info structure * RegAddr register address (number) to write to * RegValue 16-bit value to write to register * * Return Value: * * None * */ static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) { outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); outw( RegValue, info->io_base + CCAR ); /* Read to flush write to CCAR */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) inw( info->io_base + CCAR ); } /* end of usc_OutReg() */ /* * usc_InReg() * * Reads a 16-bit value from a USC serial channel register * * Arguments: * * info pointer to device extension * RegAddr register address (number) to read from * * Return Value: * * 16-bit value read from register */ static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr ) { outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); return inw( info->io_base + CCAR ); } /* end of usc_InReg() */ /* usc_set_sdlc_mode() * * Set up the adapter for SDLC DMA communications. * * Arguments: info pointer to device instance data * Return Value: NONE */ static void usc_set_sdlc_mode( struct mgsl_struct *info ) { u16 RegValue; bool PreSL1660; /* * determine if the IUSC on the adapter is pre-SL1660. If * not, take advantage of the UnderWait feature of more * modern chips. If an underrun occurs and this bit is set, * the transmitter will idle the programmed idle pattern * until the driver has time to service the underrun. Otherwise, * the dma controller may get the cycles previously requested * and begin transmitting queued tx data. */ usc_OutReg(info,TMCR,0x1f); RegValue=usc_InReg(info,TMDR); PreSL1660 = (RegValue == IUSC_PRE_SL1660); if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) { /* ** Channel Mode Register (CMR) ** ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun ** <13> 0 0 = Transmit Disabled (initially) ** <12> 0 1 = Consecutive Idles share common 0 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling ** <3..0> 0110 Receiver Mode = HDLC/SDLC ** ** 1000 1110 0000 0110 = 0x8e06 */ RegValue = 0x8e06; /*-------------------------------------------------- * ignore user options for UnderRun Actions and * preambles *--------------------------------------------------*/ } else { /* Channel mode Register (CMR) * * <15..14> 00 Tx Sub modes, Underrun Action * <13> 0 1 = Send Preamble before opening flag * <12> 0 1 = Consecutive Idles share common 0 * <11..8> 0110 Transmitter mode = HDLC/SDLC * <7..4> 0000 Rx Sub modes, addr/ctrl field handling * <3..0> 0110 Receiver mode = HDLC/SDLC * * 0000 0110 0000 0110 = 0x0606 */ if (info->params.mode == MGSL_MODE_RAW) { RegValue = 0x0001; /* Set Receive mode = external sync */ usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */ (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12)); /* * TxSubMode: * CMR <15> 0 Don't send CRC on Tx Underrun * CMR <14> x undefined * CMR <13> 0 Send preamble before openning sync * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength * * TxMode: * CMR <11-8) 0100 MonoSync * * 0x00 0100 xxxx xxxx 04xx */ RegValue |= 0x0400; } else { RegValue = 0x0606; if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 ) RegValue |= BIT14; else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG ) RegValue |= BIT15; else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC ) RegValue |= BIT15 + BIT14; } if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE ) RegValue |= BIT13; } if ( info->params.mode == MGSL_MODE_HDLC && (info->params.flags & HDLC_FLAG_SHARE_ZERO) ) RegValue |= BIT12; if ( info->params.addr_filter != 0xff ) { /* set up receive address filtering */ usc_OutReg( info, RSR, info->params.addr_filter ); RegValue |= BIT4; } usc_OutReg( info, CMR, RegValue ); info->cmr_value = RegValue; /* Receiver mode Register (RMR) * * <15..13> 000 encoding * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC) * <9> 0 1 = Include Receive chars in CRC * <8> 1 1 = Use Abort/PE bit as abort indicator * <7..6> 00 Even parity * <5> 0 parity disabled * <4..2> 000 Receive Char Length = 8 bits * <1..0> 00 Disable Receiver * * 0000 0101 0000 0000 = 0x0500 */ RegValue = 0x0500; switch ( info->params.encoding ) { case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; } if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) RegValue |= BIT9; else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) RegValue |= ( BIT12 | BIT10 | BIT9 ); usc_OutReg( info, RMR, RegValue ); /* Set the Receive count Limit Register (RCLR) to 0xffff. */ /* When an opening flag of an SDLC frame is recognized the */ /* Receive Character count (RCC) is loaded with the value in */ /* RCLR. The RCC is decremented for each received byte. The */ /* value of RCC is stored after the closing flag of the frame */ /* allowing the frame size to be computed. */ usc_OutReg( info, RCLR, RCLRVALUE ); usc_RCmd( info, RCmd_SelectRicrdma_level ); /* Receive Interrupt Control Register (RICR) * * <15..8> ? RxFIFO DMA Request Level * <7> 0 Exited Hunt IA (Interrupt Arm) * <6> 0 Idle Received IA * <5> 0 Break/Abort IA * <4> 0 Rx Bound IA * <3> 1 Queued status reflects oldest 2 bytes in FIFO * <2> 0 Abort/PE IA * <1> 1 Rx Overrun IA * <0> 0 Select TC0 value for readback * * 0000 0000 0000 1000 = 0x000a */ /* Carry over the Exit Hunt and Idle Received bits */ /* in case they have been armed by usc_ArmEvents. */ RegValue = usc_InReg( info, RICR ) & 0xc0; if ( info->bus_type == MGSL_BUS_TYPE_PCI ) usc_OutReg( info, RICR, (u16)(0x030a | RegValue) ); else usc_OutReg( info, RICR, (u16)(0x140a | RegValue) ); /* Unlatch all Rx status bits and clear Rx status IRQ Pending */ usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); /* Transmit mode Register (TMR) * * <15..13> 000 encoding * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC) * <9> 0 1 = Tx CRC Enabled * <8> 0 1 = Append CRC to end of transmit frame * <7..6> 00 Transmit parity Even * <5> 0 Transmit parity Disabled * <4..2> 000 Tx Char Length = 8 bits * <1..0> 00 Disable Transmitter * * 0000 0100 0000 0000 = 0x0400 */ RegValue = 0x0400; switch ( info->params.encoding ) { case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; } if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) RegValue |= BIT9 + BIT8; else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8); usc_OutReg( info, TMR, RegValue ); usc_set_txidle( info ); usc_TCmd( info, TCmd_SelectTicrdma_level ); /* Transmit Interrupt Control Register (TICR) * * <15..8> ? Transmit FIFO DMA Level * <7> 0 Present IA (Interrupt Arm) * <6> 0 Idle Sent IA * <5> 1 Abort Sent IA * <4> 1 EOF/EOM Sent IA * <3> 0 CRC Sent IA * <2> 1 1 = Wait for SW Trigger to Start Frame * <1> 1 Tx Underrun IA * <0> 0 TC0 constant on read back * * 0000 0000 0011 0110 = 0x0036 */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) usc_OutReg( info, TICR, 0x0736 ); else usc_OutReg( info, TICR, 0x1436 ); usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); /* ** Transmit Command/Status Register (TCSR) ** ** <15..12> 0000 TCmd ** <11> 0/1 UnderWait ** <10..08> 000 TxIdle ** <7> x PreSent ** <6> x IdleSent ** <5> x AbortSent ** <4> x EOF/EOM Sent ** <3> x CRC Sent ** <2> x All Sent ** <1> x TxUnder ** <0> x TxEmpty ** ** 0000 0000 0000 0000 = 0x0000 */ info->tcsr_value = 0; if ( !PreSL1660 ) info->tcsr_value |= TCSR_UNDERWAIT; usc_OutReg( info, TCSR, info->tcsr_value ); /* Clock mode Control Register (CMCR) * * <15..14> 00 counter 1 Source = Disabled * <13..12> 00 counter 0 Source = Disabled * <11..10> 11 BRG1 Input is TxC Pin * <9..8> 11 BRG0 Input is TxC Pin * <7..6> 01 DPLL Input is BRG1 Output * <5..3> XXX TxCLK comes from Port 0 * <2..0> XXX RxCLK comes from Port 1 * * 0000 1111 0111 0111 = 0x0f77 */ RegValue = 0x0f40; if ( info->params.flags & HDLC_FLAG_RXC_DPLL ) RegValue |= 0x0003; /* RxCLK from DPLL */ else if ( info->params.flags & HDLC_FLAG_RXC_BRG ) RegValue |= 0x0004; /* RxCLK from BRG0 */ else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN) RegValue |= 0x0006; /* RxCLK from TXC Input */ else RegValue |= 0x0007; /* RxCLK from Port1 */ if ( info->params.flags & HDLC_FLAG_TXC_DPLL ) RegValue |= 0x0018; /* TxCLK from DPLL */ else if ( info->params.flags & HDLC_FLAG_TXC_BRG ) RegValue |= 0x0020; /* TxCLK from BRG0 */ else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN) RegValue |= 0x0038; /* RxCLK from TXC Input */ else RegValue |= 0x0030; /* TxCLK from Port0 */ usc_OutReg( info, CMCR, RegValue ); /* Hardware Configuration Register (HCR) * * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div * <12> 0 CVOK:0=report code violation in biphase * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level * <7..6> 00 reserved * <5> 0 BRG1 mode:0=continuous,1=single cycle * <4> X BRG1 Enable * <3..2> 00 reserved * <1> 0 BRG0 mode:0=continuous,1=single cycle * <0> 0 BRG0 Enable */ RegValue = 0x0000; if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) { u32 XtalSpeed; u32 DpllDivisor; u16 Tc; /* DPLL is enabled. Use BRG1 to provide continuous reference clock */ /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) XtalSpeed = 11059200; else XtalSpeed = 14745600; if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { DpllDivisor = 16; RegValue |= BIT10; } else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { DpllDivisor = 8; RegValue |= BIT11; } else DpllDivisor = 32; /* Tc = (Xtal/Speed) - 1 */ /* If twice the remainder of (Xtal/Speed) is greater than Speed */ /* then rounding up gives a more precise time constant. Instead */ /* of rounding up and then subtracting 1 we just don't subtract */ /* the one in this case. */ /*-------------------------------------------------- * ejz: for DPLL mode, application should use the * same clock speed as the partner system, even * though clocking is derived from the input RxData. * In case the user uses a 0 for the clock speed, * default to 0xffffffff and don't try to divide by * zero *--------------------------------------------------*/ if ( info->params.clock_speed ) { Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed); if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2) / info->params.clock_speed) ) Tc--; } else Tc = -1; /* Write 16-bit Time Constant for BRG1 */ usc_OutReg( info, TC1R, Tc ); RegValue |= BIT4; /* enable BRG1 */ switch ( info->params.encoding ) { case HDLC_ENCODING_NRZ: case HDLC_ENCODING_NRZB: case HDLC_ENCODING_NRZI_MARK: case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break; case HDLC_ENCODING_BIPHASE_MARK: case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break; case HDLC_ENCODING_BIPHASE_LEVEL: case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break; } } usc_OutReg( info, HCR, RegValue ); /* Channel Control/status Register (CCSR) * * <15> X RCC FIFO Overflow status (RO) * <14> X RCC FIFO Not Empty status (RO) * <13> 0 1 = Clear RCC FIFO (WO) * <12> X DPLL Sync (RW) * <11> X DPLL 2 Missed Clocks status (RO) * <10> X DPLL 1 Missed Clock status (RO) * <9..8> 00 DPLL Resync on rising and falling edges (RW) * <7> X SDLC Loop On status (RO) * <6> X SDLC Loop Send status (RO) * <5> 1 Bypass counters for TxClk and RxClk (RW) * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) * <1..0> 00 reserved * * 0000 0000 0010 0000 = 0x0020 */ usc_OutReg( info, CCSR, 0x1020 ); if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) { usc_OutReg( info, SICR, (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) ); } /* enable Master Interrupt Enable bit (MIE) */ usc_EnableMasterIrqBit( info ); usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA + TRANSMIT_STATUS + TRANSMIT_DATA + MISC); /* arm RCC underflow interrupt */ usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3)); usc_EnableInterrupts(info, MISC); info->mbre_bit = 0; outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ info->mbre_bit = BIT8; outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ if (info->bus_type == MGSL_BUS_TYPE_ISA) { /* Enable DMAEN (Port 7, Bit 14) */ /* This connects the DMA request signal to the ISA bus */ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14)); } /* DMA Control Register (DCR) * * <15..14> 10 Priority mode = Alternating Tx/Rx * 01 Rx has priority * 00 Tx has priority * * <13> 1 Enable Priority Preempt per DCR<15..14> * (WARNING DCR<11..10> must be 00 when this is 1) * 0 Choose activate channel per DCR<11..10> * * <12> 0 Little Endian for Array/List * <11..10> 00 Both Channels can use each bus grant * <9..6> 0000 reserved * <5> 0 7 CLK - Minimum Bus Re-request Interval * <4> 0 1 = drive D/C and S/D pins * <3> 1 1 = Add one wait state to all DMA cycles. * <2> 0 1 = Strobe /UAS on every transfer. * <1..0> 11 Addr incrementing only affects LS24 bits * * 0110 0000 0000 1011 = 0x600b */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { /* PCI adapter does not need DMA wait state */ usc_OutDmaReg( info, DCR, 0xa00b ); } else usc_OutDmaReg( info, DCR, 0x800b ); /* Receive DMA mode Register (RDMR) * * <15..14> 11 DMA mode = Linked List Buffer mode * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry * <12> 1 Clear count of List Entry after fetching * <11..10> 00 Address mode = Increment * <9> 1 Terminate Buffer on RxBound * <8> 0 Bus Width = 16bits * <7..0> ? status Bits (write as 0s) * * 1111 0010 0000 0000 = 0xf200 */ usc_OutDmaReg( info, RDMR, 0xf200 ); /* Transmit DMA mode Register (TDMR) * * <15..14> 11 DMA mode = Linked List Buffer mode * <13> 1 TCBinA/L = fetch Tx Control Block from List entry * <12> 1 Clear count of List Entry after fetching * <11..10> 00 Address mode = Increment * <9> 1 Terminate Buffer on end of frame * <8> 0 Bus Width = 16bits * <7..0> ? status Bits (Read Only so write as 0) * * 1111 0010 0000 0000 = 0xf200 */ usc_OutDmaReg( info, TDMR, 0xf200 ); /* DMA Interrupt Control Register (DICR) * * <15> 1 DMA Interrupt Enable * <14> 0 1 = Disable IEO from USC * <13> 0 1 = Don't provide vector during IntAck * <12> 1 1 = Include status in Vector * <10..2> 0 reserved, Must be 0s * <1> 0 1 = Rx DMA Interrupt Enabled * <0> 0 1 = Tx DMA Interrupt Enabled * * 1001 0000 0000 0000 = 0x9000 */ usc_OutDmaReg( info, DICR, 0x9000 ); usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */ usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */ usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */ /* Channel Control Register (CCR) * * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs) * <13> 0 Trigger Tx on SW Command Disabled * <12> 0 Flag Preamble Disabled * <11..10> 00 Preamble Length * <9..8> 00 Preamble Pattern * <7..6> 10 Use 32-bit Rx status Blocks (RSBs) * <5> 0 Trigger Rx on SW Command Disabled * <4..0> 0 reserved * * 1000 0000 1000 0000 = 0x8080 */ RegValue = 0x8080; switch ( info->params.preamble_length ) { case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break; case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break; case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break; } switch ( info->params.preamble ) { case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break; case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break; case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break; case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break; } usc_OutReg( info, CCR, RegValue ); /* * Burst/Dwell Control Register * * <15..8> 0x20 Maximum number of transfers per bus grant * <7..0> 0x00 Maximum number of clock cycles per bus grant */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { /* don't limit bus occupancy on PCI adapter */ usc_OutDmaReg( info, BDCR, 0x0000 ); } else usc_OutDmaReg( info, BDCR, 0x2000 ); usc_stop_transmitter(info); usc_stop_receiver(info); } /* end of usc_set_sdlc_mode() */ /* usc_enable_loopback() * * Set the 16C32 for internal loopback mode. * The TxCLK and RxCLK signals are generated from the BRG0 and * the TxD is looped back to the RxD internally. * * Arguments: info pointer to device instance data * enable 1 = enable loopback, 0 = disable * Return Value: None */ static void usc_enable_loopback(struct mgsl_struct *info, int enable) { if (enable) { /* blank external TXD output */ usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6)); /* Clock mode Control Register (CMCR) * * <15..14> 00 counter 1 Disabled * <13..12> 00 counter 0 Disabled * <11..10> 11 BRG1 Input is TxC Pin * <9..8> 11 BRG0 Input is TxC Pin * <7..6> 01 DPLL Input is BRG1 Output * <5..3> 100 TxCLK comes from BRG0 * <2..0> 100 RxCLK comes from BRG0 * * 0000 1111 0110 0100 = 0x0f64 */ usc_OutReg( info, CMCR, 0x0f64 ); /* Write 16-bit Time Constant for BRG0 */ /* use clock speed if available, otherwise use 8 for diagnostics */ if (info->params.clock_speed) { if (info->bus_type == MGSL_BUS_TYPE_PCI) usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1)); else usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1)); } else usc_OutReg(info, TC0R, (u16)8); /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0 mode = Continuous Set Bit 0 to enable BRG0. */ usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004)); /* set Internal Data loopback mode */ info->loopback_bits = 0x300; outw( 0x0300, info->io_base + CCAR ); } else { /* enable external TXD output */ usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6)); /* clear Internal Data loopback mode */ info->loopback_bits = 0; outw( 0,info->io_base + CCAR ); } } /* end of usc_enable_loopback() */ /* usc_enable_aux_clock() * * Enabled the AUX clock output at the specified frequency. * * Arguments: * * info pointer to device extension * data_rate data rate of clock in bits per second * A data rate of 0 disables the AUX clock. * * Return Value: None */ static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate ) { u32 XtalSpeed; u16 Tc; if ( data_rate ) { if ( info->bus_type == MGSL_BUS_TYPE_PCI ) XtalSpeed = 11059200; else XtalSpeed = 14745600; /* Tc = (Xtal/Speed) - 1 */ /* If twice the remainder of (Xtal/Speed) is greater than Speed */ /* then rounding up gives a more precise time constant. Instead */ /* of rounding up and then subtracting 1 we just don't subtract */ /* the one in this case. */ Tc = (u16)(XtalSpeed/data_rate); if ( !(((XtalSpeed % data_rate) * 2) / data_rate) ) Tc--; /* Write 16-bit Time Constant for BRG0 */ usc_OutReg( info, TC0R, Tc ); /* * Hardware Configuration Register (HCR) * Clear Bit 1, BRG0 mode = Continuous * Set Bit 0 to enable BRG0. */ usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); } else { /* data rate == 0 so turn off BRG0 */ usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); } } /* end of usc_enable_aux_clock() */ /* * * usc_process_rxoverrun_sync() * * This function processes a receive overrun by resetting the * receive DMA buffers and issuing a Purge Rx FIFO command * to allow the receiver to continue receiving. * * Arguments: * * info pointer to device extension * * Return Value: None */ static void usc_process_rxoverrun_sync( struct mgsl_struct *info ) { int start_index; int end_index; int frame_start_index; bool start_of_frame_found = false; bool end_of_frame_found = false; bool reprogram_dma = false; DMABUFFERENTRY *buffer_list = info->rx_buffer_list; u32 phys_addr; usc_DmaCmd( info, DmaCmd_PauseRxChannel ); usc_RCmd( info, RCmd_EnterHuntmode ); usc_RTCmd( info, RTCmd_PurgeRxFifo ); /* CurrentRxBuffer points to the 1st buffer of the next */ /* possibly available receive frame. */ frame_start_index = start_index = end_index = info->current_rx_buffer; /* Search for an unfinished string of buffers. This means */ /* that a receive frame started (at least one buffer with */ /* count set to zero) but there is no terminiting buffer */ /* (status set to non-zero). */ while( !buffer_list[end_index].count ) { /* Count field has been reset to zero by 16C32. */ /* This buffer is currently in use. */ if ( !start_of_frame_found ) { start_of_frame_found = true; frame_start_index = end_index; end_of_frame_found = false; } if ( buffer_list[end_index].status ) { /* Status field has been set by 16C32. */ /* This is the last buffer of a received frame. */ /* We want to leave the buffers for this frame intact. */ /* Move on to next possible frame. */ start_of_frame_found = false; end_of_frame_found = true; } /* advance to next buffer entry in linked list */ end_index++; if ( end_index == info->rx_buffer_count ) end_index = 0; if ( start_index == end_index ) { /* The entire list has been searched with all Counts == 0 and */ /* all Status == 0. The receive buffers are */ /* completely screwed, reset all receive buffers! */ mgsl_reset_rx_dma_buffers( info ); frame_start_index = 0; start_of_frame_found = false; reprogram_dma = true; break; } } if ( start_of_frame_found && !end_of_frame_found ) { /* There is an unfinished string of receive DMA buffers */ /* as a result of the receiver overrun. */ /* Reset the buffers for the unfinished frame */ /* and reprogram the receive DMA controller to start */ /* at the 1st buffer of unfinished frame. */ start_index = frame_start_index; do { *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE; /* Adjust index for wrap around. */ if ( start_index == info->rx_buffer_count ) start_index = 0; } while( start_index != end_index ); reprogram_dma = true; } if ( reprogram_dma ) { usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS); usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS); usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); /* This empties the receive FIFO and loads the RCC with RCLR */ usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); /* program 16C32 with physical address of 1st DMA buffer entry */ phys_addr = info->rx_buffer_list[frame_start_index].phys_entry; usc_OutDmaReg( info, NRARL, (u16)phys_addr ); usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); usc_EnableInterrupts( info, RECEIVE_STATUS ); /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); usc_DmaCmd( info, DmaCmd_InitRxChannel ); if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) usc_EnableReceiver(info,ENABLE_AUTO_DCD); else usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); } else { /* This empties the receive FIFO and loads the RCC with RCLR */ usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); usc_RTCmd( info, RTCmd_PurgeRxFifo ); } } /* end of usc_process_rxoverrun_sync() */ /* usc_stop_receiver() * * Disable USC receiver * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_stop_receiver( struct mgsl_struct *info ) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):usc_stop_receiver(%s)\n", __FILE__,__LINE__, info->device_name ); /* Disable receive DMA channel. */ /* This also disables receive DMA channel interrupts */ usc_DmaCmd( info, DmaCmd_ResetRxChannel ); usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS ); usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); /* This empties the receive FIFO and loads the RCC with RCLR */ usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); usc_RTCmd( info, RTCmd_PurgeRxFifo ); info->rx_enabled = false; info->rx_overflow = false; info->rx_rcc_underrun = false; } /* end of stop_receiver() */ /* usc_start_receiver() * * Enable the USC receiver * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_start_receiver( struct mgsl_struct *info ) { u32 phys_addr; if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):usc_start_receiver(%s)\n", __FILE__,__LINE__, info->device_name ); mgsl_reset_rx_dma_buffers( info ); usc_stop_receiver( info ); usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); usc_RTCmd( info, RTCmd_PurgeRxFifo ); if ( info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW ) { /* DMA mode Transfers */ /* Program the DMA controller. */ /* Enable the DMA controller end of buffer interrupt. */ /* program 16C32 with physical address of 1st DMA buffer entry */ phys_addr = info->rx_buffer_list[0].phys_entry; usc_OutDmaReg( info, NRARL, (u16)phys_addr ); usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); usc_EnableInterrupts( info, RECEIVE_STATUS ); /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); usc_DmaCmd( info, DmaCmd_InitRxChannel ); if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) usc_EnableReceiver(info,ENABLE_AUTO_DCD); else usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); } else { usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); usc_EnableInterrupts(info, RECEIVE_DATA); usc_RTCmd( info, RTCmd_PurgeRxFifo ); usc_RCmd( info, RCmd_EnterHuntmode ); usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); } usc_OutReg( info, CCSR, 0x1020 ); info->rx_enabled = true; } /* end of usc_start_receiver() */ /* usc_start_transmitter() * * Enable the USC transmitter and send a transmit frame if * one is loaded in the DMA buffers. * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_start_transmitter( struct mgsl_struct *info ) { u32 phys_addr; unsigned int FrameSize; if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):usc_start_transmitter(%s)\n", __FILE__,__LINE__, info->device_name ); if ( info->xmit_cnt ) { /* If auto RTS enabled and RTS is inactive, then assert */ /* RTS and set a flag indicating that the driver should */ /* negate RTS when the transmission completes. */ info->drop_rts_on_tx_done = false; if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { usc_get_serial_signals( info ); if ( !(info->serial_signals & SerialSignal_RTS) ) { info->serial_signals |= SerialSignal_RTS; usc_set_serial_signals( info ); info->drop_rts_on_tx_done = true; } } if ( info->params.mode == MGSL_MODE_ASYNC ) { if ( !info->tx_active ) { usc_UnlatchTxstatusBits(info, TXSTATUS_ALL); usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA); usc_EnableInterrupts(info, TRANSMIT_DATA); usc_load_txfifo(info); } } else { /* Disable transmit DMA controller while programming. */ usc_DmaCmd( info, DmaCmd_ResetTxChannel ); /* Transmit DMA buffer is loaded, so program USC */ /* to send the frame contained in the buffers. */ FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc; /* if operating in Raw sync mode, reset the rcc component * of the tx dma buffer entry, otherwise, the serial controller * will send a closing sync char after this count. */ if ( info->params.mode == MGSL_MODE_RAW ) info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0; /* Program the Transmit Character Length Register (TCLR) */ /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ usc_OutReg( info, TCLR, (u16)FrameSize ); usc_RTCmd( info, RTCmd_PurgeTxFifo ); /* Program the address of the 1st DMA Buffer Entry in linked list */ phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry; usc_OutDmaReg( info, NTARL, (u16)phys_addr ); usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) ); usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); usc_EnableInterrupts( info, TRANSMIT_STATUS ); if ( info->params.mode == MGSL_MODE_RAW && info->num_tx_dma_buffers > 1 ) { /* When running external sync mode, attempt to 'stream' transmit */ /* by filling tx dma buffers as they become available. To do this */ /* we need to enable Tx DMA EOB Status interrupts : */ /* */ /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */ /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */ usc_OutDmaReg( info, TDIAR, BIT2|BIT3 ); usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) ); } /* Initialize Transmit DMA Channel */ usc_DmaCmd( info, DmaCmd_InitTxChannel ); usc_TCmd( info, TCmd_SendFrame ); mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); } info->tx_active = true; } if ( !info->tx_enabled ) { info->tx_enabled = true; if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) usc_EnableTransmitter(info,ENABLE_AUTO_CTS); else usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); } } /* end of usc_start_transmitter() */ /* usc_stop_transmitter() * * Stops the transmitter and DMA * * Arguments: info pointer to device isntance data * Return Value: None */ static void usc_stop_transmitter( struct mgsl_struct *info ) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):usc_stop_transmitter(%s)\n", __FILE__,__LINE__, info->device_name ); del_timer(&info->tx_timer); usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA ); usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA ); usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL); usc_DmaCmd( info, DmaCmd_ResetTxChannel ); usc_RTCmd( info, RTCmd_PurgeTxFifo ); info->tx_enabled = false; info->tx_active = false; } /* end of usc_stop_transmitter() */ /* usc_load_txfifo() * * Fill the transmit FIFO until the FIFO is full or * there is no more data to load. * * Arguments: info pointer to device extension (instance data) * Return Value: None */ static void usc_load_txfifo( struct mgsl_struct *info ) { int Fifocount; u8 TwoBytes[2]; if ( !info->xmit_cnt && !info->x_char ) return; /* Select transmit FIFO status readback in TICR */ usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); /* load the Transmit FIFO until FIFOs full or all data sent */ while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) { /* there is more space in the transmit FIFO and */ /* there is more data in transmit buffer */ if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) { /* write a 16-bit word from transmit buffer to 16C32 */ TwoBytes[0] = info->xmit_buf[info->xmit_tail++]; info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); TwoBytes[1] = info->xmit_buf[info->xmit_tail++]; info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); outw( *((u16 *)TwoBytes), info->io_base + DATAREG); info->xmit_cnt -= 2; info->icount.tx += 2; } else { /* only 1 byte left to transmit or 1 FIFO slot left */ outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY), info->io_base + CCAR ); if (info->x_char) { /* transmit pending high priority char */ outw( info->x_char,info->io_base + CCAR ); info->x_char = 0; } else { outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR ); info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); info->xmit_cnt--; } info->icount.tx++; } } } /* end of usc_load_txfifo() */ /* usc_reset() * * Reset the adapter to a known state and prepare it for further use. * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_reset( struct mgsl_struct *info ) { if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { int i; u32 readval; /* Set BIT30 of Misc Control Register */ /* (Local Control Register 0x50) to force reset of USC. */ volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28); info->misc_ctrl_value |= BIT30; *MiscCtrl = info->misc_ctrl_value; /* * Force at least 170ns delay before clearing * reset bit. Each read from LCR takes at least * 30ns so 10 times for 300ns to be safe. */ for(i=0;i<10;i++) readval = *MiscCtrl; info->misc_ctrl_value &= ~BIT30; *MiscCtrl = info->misc_ctrl_value; *LCR0BRDR = BUS_DESCRIPTOR( 1, // Write Strobe Hold (0-3) 2, // Write Strobe Delay (0-3) 2, // Read Strobe Delay (0-3) 0, // NWDD (Write data-data) (0-3) 4, // NWAD (Write Addr-data) (0-31) 0, // NXDA (Read/Write Data-Addr) (0-3) 0, // NRDD (Read Data-Data) (0-3) 5 // NRAD (Read Addr-Data) (0-31) ); } else { /* do HW reset */ outb( 0,info->io_base + 8 ); } info->mbre_bit = 0; info->loopback_bits = 0; info->usc_idle_mode = 0; /* * Program the Bus Configuration Register (BCR) * * <15> 0 Don't use separate address * <14..6> 0 reserved * <5..4> 00 IAckmode = Default, don't care * <3> 1 Bus Request Totem Pole output * <2> 1 Use 16 Bit data bus * <1> 0 IRQ Totem Pole output * <0> 0 Don't Shift Right Addr * * 0000 0000 0000 1100 = 0x000c * * By writing to io_base + SDPIN the Wait/Ack pin is * programmed to work as a Wait pin. */ outw( 0x000c,info->io_base + SDPIN ); outw( 0,info->io_base ); outw( 0,info->io_base + CCAR ); /* select little endian byte ordering */ usc_RTCmd( info, RTCmd_SelectLittleEndian ); /* Port Control Register (PCR) * * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled) * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled) * <11..10> 00 Port 5 is Input (No Connect, Don't Care) * <9..8> 00 Port 4 is Input (No Connect, Don't Care) * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled ) * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled ) * <3..2> 01 Port 1 is Input (Dedicated RxC) * <1..0> 01 Port 0 is Input (Dedicated TxC) * * 1111 0000 1111 0101 = 0xf0f5 */ usc_OutReg( info, PCR, 0xf0f5 ); /* * Input/Output Control Register * * <15..14> 00 CTS is active low input * <13..12> 00 DCD is active low input * <11..10> 00 TxREQ pin is input (DSR) * <9..8> 00 RxREQ pin is input (RI) * <7..6> 00 TxD is output (Transmit Data) * <5..3> 000 TxC Pin in Input (14.7456MHz Clock) * <2..0> 100 RxC is Output (drive with BRG0) * * 0000 0000 0000 0100 = 0x0004 */ usc_OutReg( info, IOCR, 0x0004 ); } /* end of usc_reset() */ /* usc_set_async_mode() * * Program adapter for asynchronous communications. * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_set_async_mode( struct mgsl_struct *info ) { u16 RegValue; /* disable interrupts while programming USC */ usc_DisableMasterIrqBit( info ); outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ usc_loopback_frame( info ); /* Channel mode Register (CMR) * * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit * <13..12> 00 00 = 16X Clock * <11..8> 0000 Transmitter mode = Asynchronous * <7..6> 00 reserved? * <5..4> 00 Rx Sub modes, 00 = 16X Clock * <3..0> 0000 Receiver mode = Asynchronous * * 0000 0000 0000 0000 = 0x0 */ RegValue = 0; if ( info->params.stop_bits != 1 ) RegValue |= BIT14; usc_OutReg( info, CMR, RegValue ); /* Receiver mode Register (RMR) * * <15..13> 000 encoding = None * <12..08> 00000 reserved (Sync Only) * <7..6> 00 Even parity * <5> 0 parity disabled * <4..2> 000 Receive Char Length = 8 bits * <1..0> 00 Disable Receiver * * 0000 0000 0000 0000 = 0x0 */ RegValue = 0; if ( info->params.data_bits != 8 ) RegValue |= BIT4+BIT3+BIT2; if ( info->params.parity != ASYNC_PARITY_NONE ) { RegValue |= BIT5; if ( info->params.parity != ASYNC_PARITY_ODD ) RegValue |= BIT6; } usc_OutReg( info, RMR, RegValue ); /* Set IRQ trigger level */ usc_RCmd( info, RCmd_SelectRicrIntLevel ); /* Receive Interrupt Control Register (RICR) * * <15..8> ? RxFIFO IRQ Request Level * * Note: For async mode the receive FIFO level must be set * to 0 to avoid the situation where the FIFO contains fewer bytes * than the trigger level and no more data is expected. * * <7> 0 Exited Hunt IA (Interrupt Arm) * <6> 0 Idle Received IA * <5> 0 Break/Abort IA * <4> 0 Rx Bound IA * <3> 0 Queued status reflects oldest byte in FIFO * <2> 0 Abort/PE IA * <1> 0 Rx Overrun IA * <0> 0 Select TC0 value for readback * * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB) */ usc_OutReg( info, RICR, 0x0000 ); usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); /* Transmit mode Register (TMR) * * <15..13> 000 encoding = None * <12..08> 00000 reserved (Sync Only) * <7..6> 00 Transmit parity Even * <5> 0 Transmit parity Disabled * <4..2> 000 Tx Char Length = 8 bits * <1..0> 00 Disable Transmitter * * 0000 0000 0000 0000 = 0x0 */ RegValue = 0; if ( info->params.data_bits != 8 ) RegValue |= BIT4+BIT3+BIT2; if ( info->params.parity != ASYNC_PARITY_NONE ) { RegValue |= BIT5; if ( info->params.parity != ASYNC_PARITY_ODD ) RegValue |= BIT6; } usc_OutReg( info, TMR, RegValue ); usc_set_txidle( info ); /* Set IRQ trigger level */ usc_TCmd( info, TCmd_SelectTicrIntLevel ); /* Transmit Interrupt Control Register (TICR) * * <15..8> ? Transmit FIFO IRQ Level * <7> 0 Present IA (Interrupt Arm) * <6> 1 Idle Sent IA * <5> 0 Abort Sent IA * <4> 0 EOF/EOM Sent IA * <3> 0 CRC Sent IA * <2> 0 1 = Wait for SW Trigger to Start Frame * <1> 0 Tx Underrun IA * <0> 0 TC0 constant on read back * * 0000 0000 0100 0000 = 0x0040 */ usc_OutReg( info, TICR, 0x1f40 ); usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); usc_enable_async_clock( info, info->params.data_rate ); /* Channel Control/status Register (CCSR) * * <15> X RCC FIFO Overflow status (RO) * <14> X RCC FIFO Not Empty status (RO) * <13> 0 1 = Clear RCC FIFO (WO) * <12> X DPLL in Sync status (RO) * <11> X DPLL 2 Missed Clocks status (RO) * <10> X DPLL 1 Missed Clock status (RO) * <9..8> 00 DPLL Resync on rising and falling edges (RW) * <7> X SDLC Loop On status (RO) * <6> X SDLC Loop Send status (RO) * <5> 1 Bypass counters for TxClk and RxClk (RW) * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) * <1..0> 00 reserved * * 0000 0000 0010 0000 = 0x0020 */ usc_OutReg( info, CCSR, 0x0020 ); usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA + RECEIVE_DATA + RECEIVE_STATUS ); usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA + RECEIVE_DATA + RECEIVE_STATUS ); usc_EnableMasterIrqBit( info ); if (info->bus_type == MGSL_BUS_TYPE_ISA) { /* Enable INTEN (Port 6, Bit12) */ /* This connects the IRQ request signal to the ISA bus */ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); } if (info->params.loopback) { info->loopback_bits = 0x300; outw(0x0300, info->io_base + CCAR); } } /* end of usc_set_async_mode() */ /* usc_loopback_frame() * * Loop back a small (2 byte) dummy SDLC frame. * Interrupts and DMA are NOT used. The purpose of this is to * clear any 'stale' status info left over from running in async mode. * * The 16C32 shows the strange behaviour of marking the 1st * received SDLC frame with a CRC error even when there is no * CRC error. To get around this a small dummy from of 2 bytes * is looped back when switching from async to sync mode. * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_loopback_frame( struct mgsl_struct *info ) { int i; unsigned long oldmode = info->params.mode; info->params.mode = MGSL_MODE_HDLC; usc_DisableMasterIrqBit( info ); usc_set_sdlc_mode( info ); usc_enable_loopback( info, 1 ); /* Write 16-bit Time Constant for BRG0 */ usc_OutReg( info, TC0R, 0 ); /* Channel Control Register (CCR) * * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs) * <13> 0 Trigger Tx on SW Command Disabled * <12> 0 Flag Preamble Disabled * <11..10> 00 Preamble Length = 8-Bits * <9..8> 01 Preamble Pattern = flags * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs) * <5> 0 Trigger Rx on SW Command Disabled * <4..0> 0 reserved * * 0000 0001 0000 0000 = 0x0100 */ usc_OutReg( info, CCR, 0x0100 ); /* SETUP RECEIVER */ usc_RTCmd( info, RTCmd_PurgeRxFifo ); usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); /* SETUP TRANSMITTER */ /* Program the Transmit Character Length Register (TCLR) */ /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ usc_OutReg( info, TCLR, 2 ); usc_RTCmd( info, RTCmd_PurgeTxFifo ); /* unlatch Tx status bits, and start transmit channel. */ usc_UnlatchTxstatusBits(info,TXSTATUS_ALL); outw(0,info->io_base + DATAREG); /* ENABLE TRANSMITTER */ usc_TCmd( info, TCmd_SendFrame ); usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); /* WAIT FOR RECEIVE COMPLETE */ for (i=0 ; i<1000 ; i++) if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1)) break; /* clear Internal Data loopback mode */ usc_enable_loopback(info, 0); usc_EnableMasterIrqBit(info); info->params.mode = oldmode; } /* end of usc_loopback_frame() */ /* usc_set_sync_mode() Programs the USC for SDLC communications. * * Arguments: info pointer to adapter info structure * Return Value: None */ static void usc_set_sync_mode( struct mgsl_struct *info ) { usc_loopback_frame( info ); usc_set_sdlc_mode( info ); if (info->bus_type == MGSL_BUS_TYPE_ISA) { /* Enable INTEN (Port 6, Bit12) */ /* This connects the IRQ request signal to the ISA bus */ usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); } usc_enable_aux_clock(info, info->params.clock_speed); if (info->params.loopback) usc_enable_loopback(info,1); } /* end of mgsl_set_sync_mode() */ /* usc_set_txidle() Set the HDLC idle mode for the transmitter. * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_set_txidle( struct mgsl_struct *info ) { u16 usc_idle_mode = IDLEMODE_FLAGS; /* Map API idle mode to USC register bits */ switch( info->idle_mode ){ case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break; case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break; case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break; case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break; case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break; case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break; case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break; } info->usc_idle_mode = usc_idle_mode; //usc_OutReg(info, TCSR, usc_idle_mode); info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */ info->tcsr_value += usc_idle_mode; usc_OutReg(info, TCSR, info->tcsr_value); /* * if SyncLink WAN adapter is running in external sync mode, the * transmitter has been set to Monosync in order to try to mimic * a true raw outbound bit stream. Monosync still sends an open/close * sync char at the start/end of a frame. Try to match those sync * patterns to the idle mode set here */ if ( info->params.mode == MGSL_MODE_RAW ) { unsigned char syncpat = 0; switch( info->idle_mode ) { case HDLC_TXIDLE_FLAGS: syncpat = 0x7e; break; case HDLC_TXIDLE_ALT_ZEROS_ONES: syncpat = 0x55; break; case HDLC_TXIDLE_ZEROS: case HDLC_TXIDLE_SPACE: syncpat = 0x00; break; case HDLC_TXIDLE_ONES: case HDLC_TXIDLE_MARK: syncpat = 0xff; break; case HDLC_TXIDLE_ALT_MARK_SPACE: syncpat = 0xaa; break; } usc_SetTransmitSyncChars(info,syncpat,syncpat); } } /* end of usc_set_txidle() */ /* usc_get_serial_signals() * * Query the adapter for the state of the V24 status (input) signals. * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_get_serial_signals( struct mgsl_struct *info ) { u16 status; /* clear all serial signals except RTS and DTR */ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR; /* Read the Misc Interrupt status Register (MISR) to get */ /* the V24 status signals. */ status = usc_InReg( info, MISR ); /* set serial signal bits to reflect MISR */ if ( status & MISCSTATUS_CTS ) info->serial_signals |= SerialSignal_CTS; if ( status & MISCSTATUS_DCD ) info->serial_signals |= SerialSignal_DCD; if ( status & MISCSTATUS_RI ) info->serial_signals |= SerialSignal_RI; if ( status & MISCSTATUS_DSR ) info->serial_signals |= SerialSignal_DSR; } /* end of usc_get_serial_signals() */ /* usc_set_serial_signals() * * Set the state of RTS and DTR based on contents of * serial_signals member of device extension. * * Arguments: info pointer to device instance data * Return Value: None */ static void usc_set_serial_signals( struct mgsl_struct *info ) { u16 Control; unsigned char V24Out = info->serial_signals; /* get the current value of the Port Control Register (PCR) */ Control = usc_InReg( info, PCR ); if ( V24Out & SerialSignal_RTS ) Control &= ~(BIT6); else Control |= BIT6; if ( V24Out & SerialSignal_DTR ) Control &= ~(BIT4); else Control |= BIT4; usc_OutReg( info, PCR, Control ); } /* end of usc_set_serial_signals() */ /* usc_enable_async_clock() * * Enable the async clock at the specified frequency. * * Arguments: info pointer to device instance data * data_rate data rate of clock in bps * 0 disables the AUX clock. * Return Value: None */ static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate ) { if ( data_rate ) { /* * Clock mode Control Register (CMCR) * * <15..14> 00 counter 1 Disabled * <13..12> 00 counter 0 Disabled * <11..10> 11 BRG1 Input is TxC Pin * <9..8> 11 BRG0 Input is TxC Pin * <7..6> 01 DPLL Input is BRG1 Output * <5..3> 100 TxCLK comes from BRG0 * <2..0> 100 RxCLK comes from BRG0 * * 0000 1111 0110 0100 = 0x0f64 */ usc_OutReg( info, CMCR, 0x0f64 ); /* * Write 16-bit Time Constant for BRG0 * Time Constant = (ClkSpeed / data_rate) - 1 * ClkSpeed = 921600 (ISA), 691200 (PCI) */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) ); else usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) ); /* * Hardware Configuration Register (HCR) * Clear Bit 1, BRG0 mode = Continuous * Set Bit 0 to enable BRG0. */ usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); } else { /* data rate == 0 so turn off BRG0 */ usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); } } /* end of usc_enable_async_clock() */ /* * Buffer Structures: * * Normal memory access uses virtual addresses that can make discontiguous * physical memory pages appear to be contiguous in the virtual address * space (the processors memory mapping handles the conversions). * * DMA transfers require physically contiguous memory. This is because * the DMA system controller and DMA bus masters deal with memory using * only physical addresses. * * This causes a problem under Windows NT when large DMA buffers are * needed. Fragmentation of the nonpaged pool prevents allocations of * physically contiguous buffers larger than the PAGE_SIZE. * * However the 16C32 supports Bus Master Scatter/Gather DMA which * allows DMA transfers to physically discontiguous buffers. Information * about each data transfer buffer is contained in a memory structure * called a 'buffer entry'. A list of buffer entries is maintained * to track and control the use of the data transfer buffers. * * To support this strategy we will allocate sufficient PAGE_SIZE * contiguous memory buffers to allow for the total required buffer * space. * * The 16C32 accesses the list of buffer entries using Bus Master * DMA. Control information is read from the buffer entries by the * 16C32 to control data transfers. status information is written to * the buffer entries by the 16C32 to indicate the status of completed * transfers. * * The CPU writes control information to the buffer entries to control * the 16C32 and reads status information from the buffer entries to * determine information about received and transmitted frames. * * Because the CPU and 16C32 (adapter) both need simultaneous access * to the buffer entries, the buffer entry memory is allocated with * HalAllocateCommonBuffer(). This restricts the size of the buffer * entry list to PAGE_SIZE. * * The actual data buffers on the other hand will only be accessed * by the CPU or the adapter but not by both simultaneously. This allows * Scatter/Gather packet based DMA procedures for using physically * discontiguous pages. */ /* * mgsl_reset_tx_dma_buffers() * * Set the count for all transmit buffers to 0 to indicate the * buffer is available for use and set the current buffer to the * first buffer. This effectively makes all buffers free and * discards any data in buffers. * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ) { unsigned int i; for ( i = 0; i < info->tx_buffer_count; i++ ) { *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0; } info->current_tx_buffer = 0; info->start_tx_dma_buffer = 0; info->tx_dma_buffers_used = 0; info->get_tx_holding_index = 0; info->put_tx_holding_index = 0; info->tx_holding_count = 0; } /* end of mgsl_reset_tx_dma_buffers() */ /* * num_free_tx_dma_buffers() * * returns the number of free tx dma buffers available * * Arguments: info pointer to device instance data * Return Value: number of free tx dma buffers */ static int num_free_tx_dma_buffers(struct mgsl_struct *info) { return info->tx_buffer_count - info->tx_dma_buffers_used; } /* * mgsl_reset_rx_dma_buffers() * * Set the count for all receive buffers to DMABUFFERSIZE * and set the current buffer to the first buffer. This effectively * makes all buffers free and discards any data in buffers. * * Arguments: info pointer to device instance data * Return Value: None */ static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ) { unsigned int i; for ( i = 0; i < info->rx_buffer_count; i++ ) { *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE; // info->rx_buffer_list[i].count = DMABUFFERSIZE; // info->rx_buffer_list[i].status = 0; } info->current_rx_buffer = 0; } /* end of mgsl_reset_rx_dma_buffers() */ /* * mgsl_free_rx_frame_buffers() * * Free the receive buffers used by a received SDLC * frame such that the buffers can be reused. * * Arguments: * * info pointer to device instance data * StartIndex index of 1st receive buffer of frame * EndIndex index of last receive buffer of frame * * Return Value: None */ static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ) { bool Done = false; DMABUFFERENTRY *pBufEntry; unsigned int Index; /* Starting with 1st buffer entry of the frame clear the status */ /* field and set the count field to DMA Buffer Size. */ Index = StartIndex; while( !Done ) { pBufEntry = &(info->rx_buffer_list[Index]); if ( Index == EndIndex ) { /* This is the last buffer of the frame! */ Done = true; } /* reset current buffer for reuse */ // pBufEntry->status = 0; // pBufEntry->count = DMABUFFERSIZE; *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE; /* advance to next buffer entry in linked list */ Index++; if ( Index == info->rx_buffer_count ) Index = 0; } /* set current buffer to next buffer after last buffer of frame */ info->current_rx_buffer = Index; } /* end of free_rx_frame_buffers() */ /* mgsl_get_rx_frame() * * This function attempts to return a received SDLC frame from the * receive DMA buffers. Only frames received without errors are returned. * * Arguments: info pointer to device extension * Return Value: true if frame returned, otherwise false */ static bool mgsl_get_rx_frame(struct mgsl_struct *info) { unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ unsigned short status; DMABUFFERENTRY *pBufEntry; unsigned int framesize = 0; bool ReturnCode = false; unsigned long flags; struct tty_struct *tty = info->port.tty; bool return_frame = false; /* * current_rx_buffer points to the 1st buffer of the next available * receive frame. To find the last buffer of the frame look for * a non-zero status field in the buffer entries. (The status * field is set by the 16C32 after completing a receive frame. */ StartIndex = EndIndex = info->current_rx_buffer; while( !info->rx_buffer_list[EndIndex].status ) { /* * If the count field of the buffer entry is non-zero then * this buffer has not been used. (The 16C32 clears the count * field when it starts using the buffer.) If an unused buffer * is encountered then there are no frames available. */ if ( info->rx_buffer_list[EndIndex].count ) goto Cleanup; /* advance to next buffer entry in linked list */ EndIndex++; if ( EndIndex == info->rx_buffer_count ) EndIndex = 0; /* if entire list searched then no frame available */ if ( EndIndex == StartIndex ) { /* If this occurs then something bad happened, * all buffers have been 'used' but none mark * the end of a frame. Reset buffers and receiver. */ if ( info->rx_enabled ){ spin_lock_irqsave(&info->irq_spinlock,flags); usc_start_receiver(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } goto Cleanup; } } /* check status of receive frame */ status = info->rx_buffer_list[EndIndex].status; if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { if ( status & RXSTATUS_SHORT_FRAME ) info->icount.rxshort++; else if ( status & RXSTATUS_ABORT ) info->icount.rxabort++; else if ( status & RXSTATUS_OVERRUN ) info->icount.rxover++; else { info->icount.rxcrc++; if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) return_frame = true; } framesize = 0; #if SYNCLINK_GENERIC_HDLC { info->netdev->stats.rx_errors++; info->netdev->stats.rx_frame_errors++; } #endif } else return_frame = true; if ( return_frame ) { /* receive frame has no errors, get frame size. * The frame size is the starting value of the RCC (which was * set to 0xffff) minus the ending value of the RCC (decremented * once for each receive character) minus 2 for the 16-bit CRC. */ framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc; /* adjust frame size for CRC if any */ if ( info->params.crc_type == HDLC_CRC_16_CCITT ) framesize -= 2; else if ( info->params.crc_type == HDLC_CRC_32_CCITT ) framesize -= 4; } if ( debug_level >= DEBUG_LEVEL_BH ) printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n", __FILE__,__LINE__,info->device_name,status,framesize); if ( debug_level >= DEBUG_LEVEL_DATA ) mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr, min_t(int, framesize, DMABUFFERSIZE),0); if (framesize) { if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) && ((framesize+1) > info->max_frame_size) ) || (framesize > info->max_frame_size) ) info->icount.rxlong++; else { /* copy dma buffer(s) to contiguous intermediate buffer */ int copy_count = framesize; int index = StartIndex; unsigned char *ptmp = info->intermediate_rxbuffer; if ( !(status & RXSTATUS_CRC_ERROR)) info->icount.rxok++; while(copy_count) { int partial_count; if ( copy_count > DMABUFFERSIZE ) partial_count = DMABUFFERSIZE; else partial_count = copy_count; pBufEntry = &(info->rx_buffer_list[index]); memcpy( ptmp, pBufEntry->virt_addr, partial_count ); ptmp += partial_count; copy_count -= partial_count; if ( ++index == info->rx_buffer_count ) index = 0; } if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) { ++framesize; *ptmp = (status & RXSTATUS_CRC_ERROR ? RX_CRC_ERROR : RX_OK); if ( debug_level >= DEBUG_LEVEL_DATA ) printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n", __FILE__,__LINE__,info->device_name, *ptmp); } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); else #endif ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); } } /* Free the buffers used by this frame. */ mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex ); ReturnCode = true; Cleanup: if ( info->rx_enabled && info->rx_overflow ) { /* The receiver needs to restarted because of * a receive overflow (buffer or FIFO). If the * receive buffers are now empty, then restart receiver. */ if ( !info->rx_buffer_list[EndIndex].status && info->rx_buffer_list[EndIndex].count ) { spin_lock_irqsave(&info->irq_spinlock,flags); usc_start_receiver(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } } return ReturnCode; } /* end of mgsl_get_rx_frame() */ /* mgsl_get_raw_rx_frame() * * This function attempts to return a received frame from the * receive DMA buffers when running in external loop mode. In this mode, * we will return at most one DMABUFFERSIZE frame to the application. * The USC receiver is triggering off of DCD going active to start a new * frame, and DCD going inactive to terminate the frame (similar to * processing a closing flag character). * * In this routine, we will return DMABUFFERSIZE "chunks" at a time. * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero * status field and the RCC field will indicate the length of the * entire received frame. We take this RCC field and get the modulus * of RCC and DMABUFFERSIZE to determine if number of bytes in the * last Rx DMA buffer and return that last portion of the frame. * * Arguments: info pointer to device extension * Return Value: true if frame returned, otherwise false */ static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info) { unsigned int CurrentIndex, NextIndex; unsigned short status; DMABUFFERENTRY *pBufEntry; unsigned int framesize = 0; bool ReturnCode = false; unsigned long flags; struct tty_struct *tty = info->port.tty; /* * current_rx_buffer points to the 1st buffer of the next available * receive frame. The status field is set by the 16C32 after * completing a receive frame. If the status field of this buffer * is zero, either the USC is still filling this buffer or this * is one of a series of buffers making up a received frame. * * If the count field of this buffer is zero, the USC is either * using this buffer or has used this buffer. Look at the count * field of the next buffer. If that next buffer's count is * non-zero, the USC is still actively using the current buffer. * Otherwise, if the next buffer's count field is zero, the * current buffer is complete and the USC is using the next * buffer. */ CurrentIndex = NextIndex = info->current_rx_buffer; ++NextIndex; if ( NextIndex == info->rx_buffer_count ) NextIndex = 0; if ( info->rx_buffer_list[CurrentIndex].status != 0 || (info->rx_buffer_list[CurrentIndex].count == 0 && info->rx_buffer_list[NextIndex].count == 0)) { /* * Either the status field of this dma buffer is non-zero * (indicating the last buffer of a receive frame) or the next * buffer is marked as in use -- implying this buffer is complete * and an intermediate buffer for this received frame. */ status = info->rx_buffer_list[CurrentIndex].status; if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { if ( status & RXSTATUS_SHORT_FRAME ) info->icount.rxshort++; else if ( status & RXSTATUS_ABORT ) info->icount.rxabort++; else if ( status & RXSTATUS_OVERRUN ) info->icount.rxover++; else info->icount.rxcrc++; framesize = 0; } else { /* * A receive frame is available, get frame size and status. * * The frame size is the starting value of the RCC (which was * set to 0xffff) minus the ending value of the RCC (decremented * once for each receive character) minus 2 or 4 for the 16-bit * or 32-bit CRC. * * If the status field is zero, this is an intermediate buffer. * It's size is 4K. * * If the DMA Buffer Entry's Status field is non-zero, the * receive operation completed normally (ie: DCD dropped). The * RCC field is valid and holds the received frame size. * It is possible that the RCC field will be zero on a DMA buffer * entry with a non-zero status. This can occur if the total * frame size (number of bytes between the time DCD goes active * to the time DCD goes inactive) exceeds 65535 bytes. In this * case the 16C32 has underrun on the RCC count and appears to * stop updating this counter to let us know the actual received * frame size. If this happens (non-zero status and zero RCC), * simply return the entire RxDMA Buffer */ if ( status ) { /* * In the event that the final RxDMA Buffer is * terminated with a non-zero status and the RCC * field is zero, we interpret this as the RCC * having underflowed (received frame > 65535 bytes). * * Signal the event to the user by passing back * a status of RxStatus_CrcError returning the full * buffer and let the app figure out what data is * actually valid */ if ( info->rx_buffer_list[CurrentIndex].rcc ) framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc; else framesize = DMABUFFERSIZE; } else framesize = DMABUFFERSIZE; } if ( framesize > DMABUFFERSIZE ) { /* * if running in raw sync mode, ISR handler for * End Of Buffer events terminates all buffers at 4K. * If this frame size is said to be >4K, get the * actual number of bytes of the frame in this buffer. */ framesize = framesize % DMABUFFERSIZE; } if ( debug_level >= DEBUG_LEVEL_BH ) printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n", __FILE__,__LINE__,info->device_name,status,framesize); if ( debug_level >= DEBUG_LEVEL_DATA ) mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr, min_t(int, framesize, DMABUFFERSIZE),0); if (framesize) { /* copy dma buffer(s) to contiguous intermediate buffer */ /* NOTE: we never copy more than DMABUFFERSIZE bytes */ pBufEntry = &(info->rx_buffer_list[CurrentIndex]); memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize); info->icount.rxok++; ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); } /* Free the buffers used by this frame. */ mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex ); ReturnCode = true; } if ( info->rx_enabled && info->rx_overflow ) { /* The receiver needs to restarted because of * a receive overflow (buffer or FIFO). If the * receive buffers are now empty, then restart receiver. */ if ( !info->rx_buffer_list[CurrentIndex].status && info->rx_buffer_list[CurrentIndex].count ) { spin_lock_irqsave(&info->irq_spinlock,flags); usc_start_receiver(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); } } return ReturnCode; } /* end of mgsl_get_raw_rx_frame() */ /* mgsl_load_tx_dma_buffer() * * Load the transmit DMA buffer with the specified data. * * Arguments: * * info pointer to device extension * Buffer pointer to buffer containing frame to load * BufferSize size in bytes of frame in Buffer * * Return Value: None */ static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize) { unsigned short Copycount; unsigned int i = 0; DMABUFFERENTRY *pBufEntry; if ( debug_level >= DEBUG_LEVEL_DATA ) mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1); if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { /* set CMR:13 to start transmit when * next GoAhead (abort) is received */ info->cmr_value |= BIT13; } /* begin loading the frame in the next available tx dma * buffer, remember it's starting location for setting * up tx dma operation */ i = info->current_tx_buffer; info->start_tx_dma_buffer = i; /* Setup the status and RCC (Frame Size) fields of the 1st */ /* buffer entry in the transmit DMA buffer list. */ info->tx_buffer_list[i].status = info->cmr_value & 0xf000; info->tx_buffer_list[i].rcc = BufferSize; info->tx_buffer_list[i].count = BufferSize; /* Copy frame data from 1st source buffer to the DMA buffers. */ /* The frame data may span multiple DMA buffers. */ while( BufferSize ){ /* Get a pointer to next DMA buffer entry. */ pBufEntry = &info->tx_buffer_list[i++]; if ( i == info->tx_buffer_count ) i=0; /* Calculate the number of bytes that can be copied from */ /* the source buffer to this DMA buffer. */ if ( BufferSize > DMABUFFERSIZE ) Copycount = DMABUFFERSIZE; else Copycount = BufferSize; /* Actually copy data from source buffer to DMA buffer. */ /* Also set the data count for this individual DMA buffer. */ if ( info->bus_type == MGSL_BUS_TYPE_PCI ) mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount); else memcpy(pBufEntry->virt_addr, Buffer, Copycount); pBufEntry->count = Copycount; /* Advance source pointer and reduce remaining data count. */ Buffer += Copycount; BufferSize -= Copycount; ++info->tx_dma_buffers_used; } /* remember next available tx dma buffer */ info->current_tx_buffer = i; } /* end of mgsl_load_tx_dma_buffer() */ /* * mgsl_register_test() * * Performs a register test of the 16C32. * * Arguments: info pointer to device instance data * Return Value: true if test passed, otherwise false */ static bool mgsl_register_test( struct mgsl_struct *info ) { static unsigned short BitPatterns[] = { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f }; static unsigned int Patterncount = ARRAY_SIZE(BitPatterns); unsigned int i; bool rc = true; unsigned long flags; spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); /* Verify the reset state of some registers. */ if ( (usc_InReg( info, SICR ) != 0) || (usc_InReg( info, IVR ) != 0) || (usc_InDmaReg( info, DIVR ) != 0) ){ rc = false; } if ( rc ){ /* Write bit patterns to various registers but do it out of */ /* sync, then read back and verify values. */ for ( i = 0 ; i < Patterncount ; i++ ) { usc_OutReg( info, TC0R, BitPatterns[i] ); usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] ); usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] ); usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] ); usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] ); usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] ); if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) || (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) || (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) || (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) || (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) || (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){ rc = false; break; } } } usc_reset(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); return rc; } /* end of mgsl_register_test() */ /* mgsl_irq_test() Perform interrupt test of the 16C32. * * Arguments: info pointer to device instance data * Return Value: true if test passed, otherwise false */ static bool mgsl_irq_test( struct mgsl_struct *info ) { unsigned long EndTime; unsigned long flags; spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); /* * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. * The ISR sets irq_occurred to true. */ info->irq_occurred = false; /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */ /* Enable INTEN (Port 6, Bit12) */ /* This connects the IRQ request signal to the ISA bus */ /* on the ISA adapter. This has no effect for the PCI adapter */ usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); usc_EnableMasterIrqBit(info); usc_EnableInterrupts(info, IO_PIN); usc_ClearIrqPendingBits(info, IO_PIN); usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); spin_unlock_irqrestore(&info->irq_spinlock,flags); EndTime=100; while( EndTime-- && !info->irq_occurred ) { msleep_interruptible(10); } spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); return info->irq_occurred; } /* end of mgsl_irq_test() */ /* mgsl_dma_test() * * Perform a DMA test of the 16C32. A small frame is * transmitted via DMA from a transmit buffer to a receive buffer * using single buffer DMA mode. * * Arguments: info pointer to device instance data * Return Value: true if test passed, otherwise false */ static bool mgsl_dma_test( struct mgsl_struct *info ) { unsigned short FifoLevel; unsigned long phys_addr; unsigned int FrameSize; unsigned int i; char *TmpPtr; bool rc = true; unsigned short status=0; unsigned long EndTime; unsigned long flags; MGSL_PARAMS tmp_params; /* save current port options */ memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS)); /* load default port options */ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); #define TESTFRAMESIZE 40 spin_lock_irqsave(&info->irq_spinlock,flags); /* setup 16C32 for SDLC DMA transfer mode */ usc_reset(info); usc_set_sdlc_mode(info); usc_enable_loopback(info,1); /* Reprogram the RDMR so that the 16C32 does NOT clear the count * field of the buffer entry after fetching buffer address. This * way we can detect a DMA failure for a DMA read (which should be * non-destructive to system memory) before we try and write to * memory (where a failure could corrupt system memory). */ /* Receive DMA mode Register (RDMR) * * <15..14> 11 DMA mode = Linked List Buffer mode * <13> 1 RSBinA/L = store Rx status Block in List entry * <12> 0 1 = Clear count of List Entry after fetching * <11..10> 00 Address mode = Increment * <9> 1 Terminate Buffer on RxBound * <8> 0 Bus Width = 16bits * <7..0> ? status Bits (write as 0s) * * 1110 0010 0000 0000 = 0xe200 */ usc_OutDmaReg( info, RDMR, 0xe200 ); spin_unlock_irqrestore(&info->irq_spinlock,flags); /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */ FrameSize = TESTFRAMESIZE; /* setup 1st transmit buffer entry: */ /* with frame size and transmit control word */ info->tx_buffer_list[0].count = FrameSize; info->tx_buffer_list[0].rcc = FrameSize; info->tx_buffer_list[0].status = 0x4000; /* build a transmit frame in 1st transmit DMA buffer */ TmpPtr = info->tx_buffer_list[0].virt_addr; for (i = 0; i < FrameSize; i++ ) *TmpPtr++ = i; /* setup 1st receive buffer entry: */ /* clear status, set max receive buffer size */ info->rx_buffer_list[0].status = 0; info->rx_buffer_list[0].count = FrameSize + 4; /* zero out the 1st receive buffer */ memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 ); /* Set count field of next buffer entries to prevent */ /* 16C32 from using buffers after the 1st one. */ info->tx_buffer_list[1].count = 0; info->rx_buffer_list[1].count = 0; /***************************/ /* Program 16C32 receiver. */ /***************************/ spin_lock_irqsave(&info->irq_spinlock,flags); /* setup DMA transfers */ usc_RTCmd( info, RTCmd_PurgeRxFifo ); /* program 16C32 receiver with physical address of 1st DMA buffer entry */ phys_addr = info->rx_buffer_list[0].phys_entry; usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr ); usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) ); /* Clear the Rx DMA status bits (read RDMR) and start channel */ usc_InDmaReg( info, RDMR ); usc_DmaCmd( info, DmaCmd_InitRxChannel ); /* Enable Receiver (RMR <1..0> = 10) */ usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) ); spin_unlock_irqrestore(&info->irq_spinlock,flags); /*************************************************************/ /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */ /*************************************************************/ /* Wait 100ms for interrupt. */ EndTime = jiffies + msecs_to_jiffies(100); for(;;) { if (time_after(jiffies, EndTime)) { rc = false; break; } spin_lock_irqsave(&info->irq_spinlock,flags); status = usc_InDmaReg( info, RDMR ); spin_unlock_irqrestore(&info->irq_spinlock,flags); if ( !(status & BIT4) && (status & BIT5) ) { /* INITG (BIT 4) is inactive (no entry read in progress) AND */ /* BUSY (BIT 5) is active (channel still active). */ /* This means the buffer entry read has completed. */ break; } } /******************************/ /* Program 16C32 transmitter. */ /******************************/ spin_lock_irqsave(&info->irq_spinlock,flags); /* Program the Transmit Character Length Register (TCLR) */ /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count ); usc_RTCmd( info, RTCmd_PurgeTxFifo ); /* Program the address of the 1st DMA Buffer Entry in linked list */ phys_addr = info->tx_buffer_list[0].phys_entry; usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr ); usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) ); /* unlatch Tx status bits, and start transmit channel. */ usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) ); usc_DmaCmd( info, DmaCmd_InitTxChannel ); /* wait for DMA controller to fill transmit FIFO */ usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); spin_unlock_irqrestore(&info->irq_spinlock,flags); /**********************************/ /* WAIT FOR TRANSMIT FIFO TO FILL */ /**********************************/ /* Wait 100ms */ EndTime = jiffies + msecs_to_jiffies(100); for(;;) { if (time_after(jiffies, EndTime)) { rc = false; break; } spin_lock_irqsave(&info->irq_spinlock,flags); FifoLevel = usc_InReg(info, TICR) >> 8; spin_unlock_irqrestore(&info->irq_spinlock,flags); if ( FifoLevel < 16 ) break; else if ( FrameSize < 32 ) { /* This frame is smaller than the entire transmit FIFO */ /* so wait for the entire frame to be loaded. */ if ( FifoLevel <= (32 - FrameSize) ) break; } } if ( rc ) { /* Enable 16C32 transmitter. */ spin_lock_irqsave(&info->irq_spinlock,flags); /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */ usc_TCmd( info, TCmd_SendFrame ); usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) ); spin_unlock_irqrestore(&info->irq_spinlock,flags); /******************************/ /* WAIT FOR TRANSMIT COMPLETE */ /******************************/ /* Wait 100ms */ EndTime = jiffies + msecs_to_jiffies(100); /* While timer not expired wait for transmit complete */ spin_lock_irqsave(&info->irq_spinlock,flags); status = usc_InReg( info, TCSR ); spin_unlock_irqrestore(&info->irq_spinlock,flags); while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) { if (time_after(jiffies, EndTime)) { rc = false; break; } spin_lock_irqsave(&info->irq_spinlock,flags); status = usc_InReg( info, TCSR ); spin_unlock_irqrestore(&info->irq_spinlock,flags); } } if ( rc ){ /* CHECK FOR TRANSMIT ERRORS */ if ( status & (BIT5 + BIT1) ) rc = false; } if ( rc ) { /* WAIT FOR RECEIVE COMPLETE */ /* Wait 100ms */ EndTime = jiffies + msecs_to_jiffies(100); /* Wait for 16C32 to write receive status to buffer entry. */ status=info->rx_buffer_list[0].status; while ( status == 0 ) { if (time_after(jiffies, EndTime)) { rc = false; break; } status=info->rx_buffer_list[0].status; } } if ( rc ) { /* CHECK FOR RECEIVE ERRORS */ status = info->rx_buffer_list[0].status; if ( status & (BIT8 + BIT3 + BIT1) ) { /* receive error has occurred */ rc = false; } else { if ( memcmp( info->tx_buffer_list[0].virt_addr , info->rx_buffer_list[0].virt_addr, FrameSize ) ){ rc = false; } } } spin_lock_irqsave(&info->irq_spinlock,flags); usc_reset( info ); spin_unlock_irqrestore(&info->irq_spinlock,flags); /* restore current port options */ memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); return rc; } /* end of mgsl_dma_test() */ /* mgsl_adapter_test() * * Perform the register, IRQ, and DMA tests for the 16C32. * * Arguments: info pointer to device instance data * Return Value: 0 if success, otherwise -ENODEV */ static int mgsl_adapter_test( struct mgsl_struct *info ) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):Testing device %s\n", __FILE__,__LINE__,info->device_name ); if ( !mgsl_register_test( info ) ) { info->init_error = DiagStatus_AddressFailure; printk( "%s(%d):Register test failure for device %s Addr=%04X\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); return -ENODEV; } if ( !mgsl_irq_test( info ) ) { info->init_error = DiagStatus_IrqFailure; printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); return -ENODEV; } if ( !mgsl_dma_test( info ) ) { info->init_error = DiagStatus_DmaFailure; printk( "%s(%d):DMA test failure for device %s DMA=%d\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) ); return -ENODEV; } if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):device %s passed diagnostics\n", __FILE__,__LINE__,info->device_name ); return 0; } /* end of mgsl_adapter_test() */ /* mgsl_memory_test() * * Test the shared memory on a PCI adapter. * * Arguments: info pointer to device instance data * Return Value: true if test passed, otherwise false */ static bool mgsl_memory_test( struct mgsl_struct *info ) { static unsigned long BitPatterns[] = { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; unsigned long Patterncount = ARRAY_SIZE(BitPatterns); unsigned long i; unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long); unsigned long * TestAddr; if ( info->bus_type != MGSL_BUS_TYPE_PCI ) return true; TestAddr = (unsigned long *)info->memory_base; /* Test data lines with test pattern at one location. */ for ( i = 0 ; i < Patterncount ; i++ ) { *TestAddr = BitPatterns[i]; if ( *TestAddr != BitPatterns[i] ) return false; } /* Test address lines with incrementing pattern over */ /* entire address range. */ for ( i = 0 ; i < TestLimit ; i++ ) { *TestAddr = i * 4; TestAddr++; } TestAddr = (unsigned long *)info->memory_base; for ( i = 0 ; i < TestLimit ; i++ ) { if ( *TestAddr != i * 4 ) return false; TestAddr++; } memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE ); return true; } /* End Of mgsl_memory_test() */ /* mgsl_load_pci_memory() * * Load a large block of data into the PCI shared memory. * Use this instead of memcpy() or memmove() to move data * into the PCI shared memory. * * Notes: * * This function prevents the PCI9050 interface chip from hogging * the adapter local bus, which can starve the 16C32 by preventing * 16C32 bus master cycles. * * The PCI9050 documentation says that the 9050 will always release * control of the local bus after completing the current read * or write operation. * * It appears that as long as the PCI9050 write FIFO is full, the * PCI9050 treats all of the writes as a single burst transaction * and will not release the bus. This causes DMA latency problems * at high speeds when copying large data blocks to the shared * memory. * * This function in effect, breaks the a large shared memory write * into multiple transations by interleaving a shared memory read * which will flush the write FIFO and 'complete' the write * transation. This allows any pending DMA request to gain control * of the local bus in a timely fasion. * * Arguments: * * TargetPtr pointer to target address in PCI shared memory * SourcePtr pointer to source buffer for data * count count in bytes of data to copy * * Return Value: None */ static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr, unsigned short count ) { /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */ #define PCI_LOAD_INTERVAL 64 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL; unsigned short Index; unsigned long Dummy; for ( Index = 0 ; Index < Intervalcount ; Index++ ) { memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL); Dummy = *((volatile unsigned long *)TargetPtr); TargetPtr += PCI_LOAD_INTERVAL; SourcePtr += PCI_LOAD_INTERVAL; } memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL ); } /* End Of mgsl_load_pci_memory() */ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit) { int i; int linecount; if (xmit) printk("%s tx data:\n",info->device_name); else printk("%s rx data:\n",info->device_name); while(count) { if (count > 16) linecount = 16; else linecount = count; for(i=0;i<linecount;i++) printk("%02X ",(unsigned char)data[i]); for(;i<17;i++) printk(" "); for(i=0;i<linecount;i++) { if (data[i]>=040 && data[i]<=0176) printk("%c",data[i]); else printk("."); } printk("\n"); data += linecount; count -= linecount; } } /* end of mgsl_trace_block() */ /* mgsl_tx_timeout() * * called when HDLC frame times out * update stats and do tx completion processing * * Arguments: context pointer to device instance data * Return Value: None */ static void mgsl_tx_timeout(unsigned long context) { struct mgsl_struct *info = (struct mgsl_struct*)context; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):mgsl_tx_timeout(%s)\n", __FILE__,__LINE__,info->device_name); if(info->tx_active && (info->params.mode == MGSL_MODE_HDLC || info->params.mode == MGSL_MODE_RAW) ) { info->icount.txtimeout++; } spin_lock_irqsave(&info->irq_spinlock,flags); info->tx_active = false; info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) usc_loopmode_cancel_transmit( info ); spin_unlock_irqrestore(&info->irq_spinlock,flags); #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif mgsl_bh_transmit(info); } /* end of mgsl_tx_timeout() */ /* signal that there are no more frames to send, so that * line is 'released' by echoing RxD to TxD when current * transmission is complete (or immediately if no tx in progress). */ static int mgsl_loopmode_send_done( struct mgsl_struct * info ) { unsigned long flags; spin_lock_irqsave(&info->irq_spinlock,flags); if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { if (info->tx_active) info->loopmode_send_done_requested = true; else usc_loopmode_send_done(info); } spin_unlock_irqrestore(&info->irq_spinlock,flags); return 0; } /* release the line by echoing RxD to TxD * upon completion of a transmit frame */ static void usc_loopmode_send_done( struct mgsl_struct * info ) { info->loopmode_send_done_requested = false; /* clear CMR:13 to 0 to start echoing RxData to TxData */ info->cmr_value &= ~BIT13; usc_OutReg(info, CMR, info->cmr_value); } /* abort a transmit in progress while in HDLC LoopMode */ static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ) { /* reset tx dma channel and purge TxFifo */ usc_RTCmd( info, RTCmd_PurgeTxFifo ); usc_DmaCmd( info, DmaCmd_ResetTxChannel ); usc_loopmode_send_done( info ); } /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort) * we must clear CMR:13 to begin repeating TxData to RxData */ static void usc_loopmode_insert_request( struct mgsl_struct * info ) { info->loopmode_insert_requested = true; /* enable RxAbort irq. On next RxAbort, clear CMR:13 to * begin repeating TxData on RxData (complete insertion) */ usc_OutReg( info, RICR, (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) ); /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */ info->cmr_value |= BIT13; usc_OutReg(info, CMR, info->cmr_value); } /* return 1 if station is inserted into the loop, otherwise 0 */ static int usc_loopmode_active( struct mgsl_struct * info) { return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; } #if SYNCLINK_GENERIC_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) * set encoding and frame check sequence (FCS) options * * dev pointer to network device structure * encoding serial encoding setting * parity FCS setting * * returns 0 if success, otherwise error code */ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { struct mgsl_struct *info = dev_to_port(dev); unsigned char new_encoding; unsigned short new_crctype; /* return error if TTY interface open */ if (info->port.count) return -EBUSY; switch (encoding) { case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; default: return -EINVAL; } switch (parity) { case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; default: return -EINVAL; } info->params.encoding = new_encoding; info->params.crc_type = new_crctype; /* if network interface up, reprogram hardware */ if (info->netcount) mgsl_program_hw(info); return 0; } /** * called by generic HDLC layer to send frame * * skb socket buffer containing HDLC frame * dev pointer to network device structure */ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct mgsl_struct *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); /* stop sending until this frame completes */ netif_stop_queue(dev); /* copy data to device buffers */ info->xmit_cnt = skb->len; mgsl_load_tx_dma_buffer(info, skb->data, skb->len); /* update network statistics */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); /* save start time for transmit timeout detection */ dev->trans_start = jiffies; /* start hardware transmitter if necessary */ spin_lock_irqsave(&info->irq_spinlock,flags); if (!info->tx_active) usc_start_transmitter(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); return NETDEV_TX_OK; } /** * called by network layer when interface enabled * claim resources and initialize hardware * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_open(struct net_device *dev) { struct mgsl_struct *info = dev_to_port(dev); int rc; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); /* generic HDLC layer open processing */ if ((rc = hdlc_open(dev))) return rc; /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); if (info->port.count != 0 || info->netcount != 0) { printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; } info->netcount=1; spin_unlock_irqrestore(&info->netlock, flags); /* claim resources and init adapter */ if ((rc = startup(info)) != 0) { spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return rc; } /* assert RTS and DTR, apply hardware settings */ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; mgsl_program_hw(info); /* enable network layer transmit */ dev->trans_start = jiffies; netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ spin_lock_irqsave(&info->irq_spinlock, flags); usc_get_serial_signals(info); spin_unlock_irqrestore(&info->irq_spinlock, flags); if (info->serial_signals & SerialSignal_DCD) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } /** * called by network layer when interface is disabled * shutdown hardware and release resources * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_close(struct net_device *dev) { struct mgsl_struct *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); netif_stop_queue(dev); /* shutdown adapter and release resources */ shutdown(info); hdlc_close(dev); spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return 0; } /** * called by network layer to process IOCTL call to network device * * dev pointer to network device structure * ifr pointer to network interface request structure * cmd IOCTL command code * * returns 0 if success, otherwise error code */ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings new_line; sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; struct mgsl_struct *info = dev_to_port(dev); unsigned int flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); /* return error if TTY interface open */ if (info->port.count) return -EBUSY; if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); switch(ifr->ifr_settings.type) { case IF_GET_IFACE: /* return current sync_serial_settings */ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); switch (flags){ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; default: new_line.clock_type = CLOCK_DEFAULT; } new_line.clock_rate = info->params.clock_speed; new_line.loopback = info->params.loopback ? 1:0; if (copy_to_user(line, &new_line, size)) return -EFAULT; return 0; case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ if(!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&new_line, line, size)) return -EFAULT; switch (new_line.clock_type) { case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; case CLOCK_DEFAULT: flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; default: return -EINVAL; } if (new_line.loopback != 0 && new_line.loopback != 1) return -EINVAL; info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); info->params.flags |= flags; info->params.loopback = new_line.loopback; if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) info->params.clock_speed = new_line.clock_rate; else info->params.clock_speed = 0; /* if network interface up, reprogram hardware */ if (info->netcount) mgsl_program_hw(info); return 0; default: return hdlc_ioctl(dev, ifr, cmd); } } /** * called by network layer when transmit timeout is detected * * dev pointer to network device structure */ static void hdlcdev_tx_timeout(struct net_device *dev) { struct mgsl_struct *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_tx_timeout(%s)\n",dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; spin_lock_irqsave(&info->irq_spinlock,flags); usc_stop_transmitter(info); spin_unlock_irqrestore(&info->irq_spinlock,flags); netif_wake_queue(dev); } /** * called by device driver when transmit completes * reenable network layer transmit if stopped * * info pointer to device instance information */ static void hdlcdev_tx_done(struct mgsl_struct *info) { if (netif_queue_stopped(info->netdev)) netif_wake_queue(info->netdev); } /** * called by device driver when frame received * pass frame to network layer * * info pointer to device instance information * buf pointer to buffer contianing frame data * size count of data bytes in buf */ static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_rx(%s)\n", dev->name); if (skb == NULL) { printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); dev->stats.rx_dropped++; return; } memcpy(skb_put(skb, size), buf, size); skb->protocol = hdlc_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_rx(skb); } static const struct net_device_ops hdlcdev_ops = { .ndo_open = hdlcdev_open, .ndo_stop = hdlcdev_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = hdlcdev_ioctl, .ndo_tx_timeout = hdlcdev_tx_timeout, }; /** * called by device driver when adding device instance * do generic HDLC initialization * * info pointer to device instance information * * returns 0 if success, otherwise error code */ static int hdlcdev_init(struct mgsl_struct *info) { int rc; struct net_device *dev; hdlc_device *hdlc; /* allocate and initialize network and HDLC layer objects */ if (!(dev = alloc_hdlcdev(info))) { printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); return -ENOMEM; } /* for network layer reporting purposes only */ dev->base_addr = info->io_base; dev->irq = info->irq_level; dev->dma = info->dma_level; /* network layer callbacks and settings */ dev->netdev_ops = &hdlcdev_ops; dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ hdlc = dev_to_hdlc(dev); hdlc->attach = hdlcdev_attach; hdlc->xmit = hdlcdev_xmit; /* register objects with HDLC layer */ if ((rc = register_hdlc_device(dev))) { printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); free_netdev(dev); return rc; } info->netdev = dev; return 0; } /** * called by device driver when removing device instance * do generic HDLC cleanup * * info pointer to device instance information */ static void hdlcdev_exit(struct mgsl_struct *info) { unregister_hdlc_device(info->netdev); free_netdev(info->netdev); info->netdev = NULL; } #endif /* CONFIG_HDLC */ static int synclink_init_one (struct pci_dev *dev, const struct pci_device_id *ent) { struct mgsl_struct *info; if (pci_enable_device(dev)) { printk("error enabling pci device %p\n", dev); return -EIO; } if (!(info = mgsl_allocate_device())) { printk("can't allocate device instance data.\n"); return -EIO; } /* Copy user configuration info to device instance data */ info->io_base = pci_resource_start(dev, 2); info->irq_level = dev->irq; info->phys_memory_base = pci_resource_start(dev, 3); /* Because veremap only works on page boundaries we must map * a larger area than is actually implemented for the LCR * memory range. We map a full page starting at the page boundary. */ info->phys_lcr_base = pci_resource_start(dev, 0); info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); info->phys_lcr_base &= ~(PAGE_SIZE-1); info->bus_type = MGSL_BUS_TYPE_PCI; info->io_addr_size = 8; info->irq_flags = IRQF_SHARED; if (dev->device == 0x0210) { /* Version 1 PCI9030 based universal PCI adapter */ info->misc_ctrl_value = 0x007c4080; info->hw_version = 1; } else { /* Version 0 PCI9050 based 5V PCI adapter * A PCI9050 bug prevents reading LCR registers if * LCR base address bit 7 is set. Maintain shadow * value so we can write to LCR misc control reg. */ info->misc_ctrl_value = 0x087e4546; info->hw_version = 0; } mgsl_add_device(info); return 0; } static void synclink_remove_one (struct pci_dev *dev) { }
gpl-2.0
AndroidGX/SimpleGX-L-5.0.2_G901F
arch/arm/mach-omap1/gpio16xx.c
2362
6607
/* * OMAP16xx specific gpio init * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * * Author: * Charulatha V <charu@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/gpio.h> #include <linux/platform_data/gpio-omap.h> #include <mach/irqs.h> #define OMAP1610_GPIO1_BASE 0xfffbe400 #define OMAP1610_GPIO2_BASE 0xfffbec00 #define OMAP1610_GPIO3_BASE 0xfffbb400 #define OMAP1610_GPIO4_BASE 0xfffbbc00 #define OMAP1_MPUIO_VBASE OMAP1_MPUIO_BASE /* smart idle, enable wakeup */ #define SYSCONFIG_WORD 0x14 /* mpu gpio */ static struct __initdata resource omap16xx_mpu_gpio_resources[] = { { .start = OMAP1_MPUIO_VBASE, .end = OMAP1_MPUIO_VBASE + SZ_2K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_MPUIO, .flags = IORESOURCE_IRQ, }, }; static struct omap_gpio_reg_offs omap16xx_mpuio_regs = { .revision = USHRT_MAX, .direction = OMAP_MPUIO_IO_CNTL, .datain = OMAP_MPUIO_INPUT_LATCH, .dataout = OMAP_MPUIO_OUTPUT, .irqstatus = OMAP_MPUIO_GPIO_INT, .irqenable = OMAP_MPUIO_GPIO_MASKIT, .irqenable_inv = true, .irqctrl = OMAP_MPUIO_GPIO_INT_EDGE, }; static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = { .is_mpuio = true, .bank_width = 16, .bank_stride = 1, .regs = &omap16xx_mpuio_regs, }; static struct platform_device omap16xx_mpu_gpio = { .name = "omap_gpio", .id = 0, .dev = { .platform_data = &omap16xx_mpu_gpio_config, }, .num_resources = ARRAY_SIZE(omap16xx_mpu_gpio_resources), .resource = omap16xx_mpu_gpio_resources, }; /* gpio1 */ static struct __initdata resource omap16xx_gpio1_resources[] = { { .start = OMAP1610_GPIO1_BASE, .end = OMAP1610_GPIO1_BASE + SZ_2K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_GPIO_BANK1, .flags = IORESOURCE_IRQ, }, }; static struct omap_gpio_reg_offs omap16xx_gpio_regs = { .revision = OMAP1610_GPIO_REVISION, .direction = OMAP1610_GPIO_DIRECTION, .set_dataout = OMAP1610_GPIO_SET_DATAOUT, .clr_dataout = OMAP1610_GPIO_CLEAR_DATAOUT, .datain = OMAP1610_GPIO_DATAIN, .dataout = OMAP1610_GPIO_DATAOUT, .irqstatus = OMAP1610_GPIO_IRQSTATUS1, .irqenable = OMAP1610_GPIO_IRQENABLE1, .set_irqenable = OMAP1610_GPIO_SET_IRQENABLE1, .clr_irqenable = OMAP1610_GPIO_CLEAR_IRQENABLE1, .wkup_en = OMAP1610_GPIO_WAKEUPENABLE, .edgectrl1 = OMAP1610_GPIO_EDGE_CTRL1, .edgectrl2 = OMAP1610_GPIO_EDGE_CTRL2, }; static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = { .bank_width = 16, .regs = &omap16xx_gpio_regs, }; static struct platform_device omap16xx_gpio1 = { .name = "omap_gpio", .id = 1, .dev = { .platform_data = &omap16xx_gpio1_config, }, .num_resources = ARRAY_SIZE(omap16xx_gpio1_resources), .resource = omap16xx_gpio1_resources, }; /* gpio2 */ static struct __initdata resource omap16xx_gpio2_resources[] = { { .start = OMAP1610_GPIO2_BASE, .end = OMAP1610_GPIO2_BASE + SZ_2K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_1610_GPIO_BANK2, .flags = IORESOURCE_IRQ, }, }; static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = { .bank_width = 16, .regs = &omap16xx_gpio_regs, }; static struct platform_device omap16xx_gpio2 = { .name = "omap_gpio", .id = 2, .dev = { .platform_data = &omap16xx_gpio2_config, }, .num_resources = ARRAY_SIZE(omap16xx_gpio2_resources), .resource = omap16xx_gpio2_resources, }; /* gpio3 */ static struct __initdata resource omap16xx_gpio3_resources[] = { { .start = OMAP1610_GPIO3_BASE, .end = OMAP1610_GPIO3_BASE + SZ_2K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_1610_GPIO_BANK3, .flags = IORESOURCE_IRQ, }, }; static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = { .bank_width = 16, .regs = &omap16xx_gpio_regs, }; static struct platform_device omap16xx_gpio3 = { .name = "omap_gpio", .id = 3, .dev = { .platform_data = &omap16xx_gpio3_config, }, .num_resources = ARRAY_SIZE(omap16xx_gpio3_resources), .resource = omap16xx_gpio3_resources, }; /* gpio4 */ static struct __initdata resource omap16xx_gpio4_resources[] = { { .start = OMAP1610_GPIO4_BASE, .end = OMAP1610_GPIO4_BASE + SZ_2K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_1610_GPIO_BANK4, .flags = IORESOURCE_IRQ, }, }; static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = { .bank_width = 16, .regs = &omap16xx_gpio_regs, }; static struct platform_device omap16xx_gpio4 = { .name = "omap_gpio", .id = 4, .dev = { .platform_data = &omap16xx_gpio4_config, }, .num_resources = ARRAY_SIZE(omap16xx_gpio4_resources), .resource = omap16xx_gpio4_resources, }; static struct __initdata platform_device * omap16xx_gpio_dev[] = { &omap16xx_mpu_gpio, &omap16xx_gpio1, &omap16xx_gpio2, &omap16xx_gpio3, &omap16xx_gpio4, }; /* * omap16xx_gpio_init needs to be done before * machine_init functions access gpio APIs. * Hence omap16xx_gpio_init is a postcore_initcall. */ static int __init omap16xx_gpio_init(void) { int i; void __iomem *base; struct resource *res; struct platform_device *pdev; struct omap_gpio_platform_data *pdata; if (!cpu_is_omap16xx()) return -EINVAL; /* * Enable system clock for GPIO module. * The CAM_CLK_CTRL *is* really the right place. */ omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04, ULPD_CAM_CLK_CTRL); for (i = 0; i < ARRAY_SIZE(omap16xx_gpio_dev); i++) { pdev = omap16xx_gpio_dev[i]; pdata = pdev->dev.platform_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(!res)) { dev_err(&pdev->dev, "Invalid mem resource.\n"); return -ENODEV; } base = ioremap(res->start, resource_size(res)); if (unlikely(!base)) { dev_err(&pdev->dev, "ioremap failed.\n"); return -ENOMEM; } __raw_writel(SYSCONFIG_WORD, base + OMAP1610_GPIO_SYSCONFIG); iounmap(base); platform_device_register(omap16xx_gpio_dev[i]); } return 0; } postcore_initcall(omap16xx_gpio_init);
gpl-2.0
Buckmarble/Elite_Kernel
net/sunrpc/backchannel_rqst.c
3642
9026
/****************************************************************************** (c) 2007 Network Appliance, Inc. All Rights Reserved. (c) 2009 NetApp. All Rights Reserved. NetApp provides this source code under the GPL v2 License. The GPL v2 license is available at http://opensource.org/licenses/gpl-license.php. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ #include <linux/tcp.h> #include <linux/slab.h> #include <linux/sunrpc/xprt.h> #ifdef RPC_DEBUG #define RPCDBG_FACILITY RPCDBG_TRANS #endif #if defined(CONFIG_NFS_V4_1) /* * Helper routines that track the number of preallocation elements * on the transport. */ static inline int xprt_need_to_requeue(struct rpc_xprt *xprt) { return xprt->bc_alloc_count > 0; } static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n) { xprt->bc_alloc_count += n; } static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n) { return xprt->bc_alloc_count -= n; } /* * Free the preallocated rpc_rqst structure and the memory * buffers hanging off of it. */ static void xprt_free_allocation(struct rpc_rqst *req) { struct xdr_buf *xbufp; dprintk("RPC: free allocations for req= %p\n", req); BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); xbufp = &req->rq_private_buf; free_page((unsigned long)xbufp->head[0].iov_base); xbufp = &req->rq_snd_buf; free_page((unsigned long)xbufp->head[0].iov_base); list_del(&req->rq_bc_pa_list); kfree(req); } /* * Preallocate up to min_reqs structures and related buffers for use * by the backchannel. This function can be called multiple times * when creating new sessions that use the same rpc_xprt. The * preallocated buffers are added to the pool of resources used by * the rpc_xprt. Anyone of these resources may be used used by an * incoming callback request. It's up to the higher levels in the * stack to enforce that the maximum number of session slots is not * being exceeded. * * Some callback arguments can be large. For example, a pNFS server * using multiple deviceids. The list can be unbound, but the client * has the ability to tell the server the maximum size of the callback * requests. Each deviceID is 16 bytes, so allocate one page * for the arguments to have enough room to receive a number of these * deviceIDs. The NFS client indicates to the pNFS server that its * callback requests can be up to 4096 bytes in size. */ int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) { struct page *page_rcv = NULL, *page_snd = NULL; struct xdr_buf *xbufp = NULL; struct rpc_rqst *req, *tmp; struct list_head tmp_list; int i; dprintk("RPC: setup backchannel transport\n"); /* * We use a temporary list to keep track of the preallocated * buffers. Once we're done building the list we splice it * into the backchannel preallocation list off of the rpc_xprt * struct. This helps minimize the amount of time the list * lock is held on the rpc_xprt struct. It also makes cleanup * easier in case of memory allocation errors. */ INIT_LIST_HEAD(&tmp_list); for (i = 0; i < min_reqs; i++) { /* Pre-allocate one backchannel rpc_rqst */ req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); if (req == NULL) { printk(KERN_ERR "Failed to create bc rpc_rqst\n"); goto out_free; } /* Add the allocated buffer to the tmp list */ dprintk("RPC: adding req= %p\n", req); list_add(&req->rq_bc_pa_list, &tmp_list); req->rq_xprt = xprt; INIT_LIST_HEAD(&req->rq_list); INIT_LIST_HEAD(&req->rq_bc_list); /* Preallocate one XDR receive buffer */ page_rcv = alloc_page(GFP_KERNEL); if (page_rcv == NULL) { printk(KERN_ERR "Failed to create bc receive xbuf\n"); goto out_free; } xbufp = &req->rq_rcv_buf; xbufp->head[0].iov_base = page_address(page_rcv); xbufp->head[0].iov_len = PAGE_SIZE; xbufp->tail[0].iov_base = NULL; xbufp->tail[0].iov_len = 0; xbufp->page_len = 0; xbufp->len = PAGE_SIZE; xbufp->buflen = PAGE_SIZE; /* Preallocate one XDR send buffer */ page_snd = alloc_page(GFP_KERNEL); if (page_snd == NULL) { printk(KERN_ERR "Failed to create bc snd xbuf\n"); goto out_free; } xbufp = &req->rq_snd_buf; xbufp->head[0].iov_base = page_address(page_snd); xbufp->head[0].iov_len = 0; xbufp->tail[0].iov_base = NULL; xbufp->tail[0].iov_len = 0; xbufp->page_len = 0; xbufp->len = 0; xbufp->buflen = PAGE_SIZE; } /* * Add the temporary list to the backchannel preallocation list */ spin_lock_bh(&xprt->bc_pa_lock); list_splice(&tmp_list, &xprt->bc_pa_list); xprt_inc_alloc_count(xprt, min_reqs); spin_unlock_bh(&xprt->bc_pa_lock); dprintk("RPC: setup backchannel transport done\n"); return 0; out_free: /* * Memory allocation failed, free the temporary list */ list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) xprt_free_allocation(req); dprintk("RPC: setup backchannel transport failed\n"); return -1; } EXPORT_SYMBOL(xprt_setup_backchannel); /* * Destroys the backchannel preallocated structures. * Since these structures may have been allocated by multiple calls * to xprt_setup_backchannel, we only destroy up to the maximum number * of reqs specified by the caller. * @xprt: the transport holding the preallocated strucures * @max_reqs the maximum number of preallocated structures to destroy */ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) { struct rpc_rqst *req = NULL, *tmp = NULL; dprintk("RPC: destroy backchannel transport\n"); BUG_ON(max_reqs == 0); spin_lock_bh(&xprt->bc_pa_lock); xprt_dec_alloc_count(xprt, max_reqs); list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { dprintk("RPC: req=%p\n", req); xprt_free_allocation(req); if (--max_reqs == 0) break; } spin_unlock_bh(&xprt->bc_pa_lock); dprintk("RPC: backchannel list empty= %s\n", list_empty(&xprt->bc_pa_list) ? "true" : "false"); } EXPORT_SYMBOL(xprt_destroy_backchannel); /* * One or more rpc_rqst structure have been preallocated during the * backchannel setup. Buffer space for the send and private XDR buffers * has been preallocated as well. Use xprt_alloc_bc_request to allocate * to this request. Use xprt_free_bc_request to return it. * * We know that we're called in soft interrupt context, grab the spin_lock * since there is no need to grab the bottom half spin_lock. * * Return an available rpc_rqst, otherwise NULL if non are available. */ struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt) { struct rpc_rqst *req; dprintk("RPC: allocate a backchannel request\n"); spin_lock(&xprt->bc_pa_lock); if (!list_empty(&xprt->bc_pa_list)) { req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, rq_bc_pa_list); list_del(&req->rq_bc_pa_list); } else { req = NULL; } spin_unlock(&xprt->bc_pa_lock); if (req != NULL) { set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); req->rq_reply_bytes_recvd = 0; req->rq_bytes_sent = 0; memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(req->rq_private_buf)); } dprintk("RPC: backchannel req=%p\n", req); return req; } /* * Return the preallocated rpc_rqst structure and XDR buffers * associated with this rpc_task. */ void xprt_free_bc_request(struct rpc_rqst *req) { struct rpc_xprt *xprt = req->rq_xprt; dprintk("RPC: free backchannel req=%p\n", req); smp_mb__before_clear_bit(); BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); smp_mb__after_clear_bit(); if (!xprt_need_to_requeue(xprt)) { /* * The last remaining session was destroyed while this * entry was in use. Free the entry and don't attempt * to add back to the list because there is no need to * have anymore preallocated entries. */ dprintk("RPC: Last session removed req=%p\n", req); xprt_free_allocation(req); return; } /* * Return it to the list of preallocations so that it * may be reused by a new callback request. */ spin_lock_bh(&xprt->bc_pa_lock); list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list); spin_unlock_bh(&xprt->bc_pa_lock); } #endif /* CONFIG_NFS_V4_1 */
gpl-2.0
patjak/linux-stable
arch/x86/power/hibernate_32.c
3642
3990
/* * Hibernation support specific for i386 - temporary page tables * * Distribute under GPLv2 * * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl> */ #include <linux/gfp.h> #include <linux/suspend.h> #include <linux/bootmem.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmzone.h> /* Defined in hibernate_asm_32.S */ extern int restore_image(void); /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; /* Pointer to the temporary resume page tables */ pgd_t *resume_pg_dir; /* The following three functions are based on the analogous code in * arch/x86/mm/init_32.c */ /* * Create a middle page table on a resume-safe page and put a pointer to it in * the given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t *resume_one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC); if (!pmd_table) return NULL; set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); #else pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); #endif return pmd_table; } /* * Create a page table on a resume-safe page and place a pointer to it in * a middle page directory entry. */ static pte_t *resume_one_page_table_init(pmd_t *pmd) { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); if (!page_table) return NULL; set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); return page_table; } return pte_offset_kernel(pmd, 0); } /* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. */ static int resume_physical_mapping_init(pgd_t *pgd_base) { unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; int pgd_idx, pmd_idx; pgd_idx = pgd_index(PAGE_OFFSET); pgd = pgd_base + pgd_idx; pfn = 0; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = resume_one_md_table_init(pgd); if (!pmd) return -ENOMEM; if (pfn >= max_low_pfn) continue; for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { if (pfn >= max_low_pfn) break; /* Map with big pages if possible, otherwise create * normal page tables. * NOTE: We can mark everything as executable here */ if (cpu_has_pse) { set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); pfn += PTRS_PER_PTE; } else { pte_t *max_pte; pte = resume_one_page_table_init(pmd); if (!pte) return -ENOMEM; max_pte = pte + PTRS_PER_PTE; for (; pte < max_pte; pte++, pfn++) { if (pfn >= max_low_pfn) break; set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); } } } } return 0; } static inline void resume_init_first_level_page_table(pgd_t *pg_dir) { #ifdef CONFIG_X86_PAE int i; /* Init entries of the first-level page table to the zero page */ for (i = 0; i < PTRS_PER_PGD; i++) set_pgd(pg_dir + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); #endif } int swsusp_arch_resume(void) { int error; resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!resume_pg_dir) return -ENOMEM; resume_init_first_level_page_table(resume_pg_dir); error = resume_physical_mapping_init(resume_pg_dir); if (error) return error; /* We have got enough memory and from now on we cannot recover */ restore_image(); return 0; } /* * pfn_is_nosave - check if given pfn is in the 'nosave' section */ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); }
gpl-2.0
ernestj/pitft
drivers/media/usb/gspca/spca501.c
4410
52593
/* * SPCA501 chip based cameras initialization data * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "spca501" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("GSPCA/SPCA501 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ unsigned short contrast; __u8 brightness; __u8 colors; __u8 blue_balance; __u8 red_balance; char subtype; #define Arowana300KCMOSCamera 0 #define IntelCreateAndShare 1 #define KodakDVC325 2 #define MystFromOriUnknownCamera 3 #define SmileIntlCamera 4 #define ThreeComHomeConnectLite 5 #define ViewQuestM318B 6 }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {320, 240, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {640, 480, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; #define SPCA50X_REG_USB 0x2 /* spca505 501 */ /* * Data to initialize a SPCA501. From a capture file provided by Bill Roehl * With SPCA501 chip description */ #define CCDSP_SET /* set CCDSP parameters */ #define TG_SET /* set time generator set */ #undef DSPWIN_SET /* set DSP windows parameters */ #undef ALTER_GAMA /* Set alternate set to YUV transform coeffs. */ #define SPCA501_SNAPBIT 0x80 #define SPCA501_SNAPCTRL 0x10 /* Frame packet header offsets for the spca501 */ #define SPCA501_OFFSET_GPIO 1 #define SPCA501_OFFSET_TYPE 2 #define SPCA501_OFFSET_TURN3A 3 #define SPCA501_OFFSET_FRAMSEQ 4 #define SPCA501_OFFSET_COMPRESS 5 #define SPCA501_OFFSET_QUANT 6 #define SPCA501_OFFSET_QUANT2 7 #define SPCA501_OFFSET_DATA 8 #define SPCA501_PROP_COMP_ENABLE(d) ((d) & 1) #define SPCA501_PROP_SNAP(d) ((d) & 0x40) #define SPCA501_PROP_SNAP_CTRL(d) ((d) & 0x10) #define SPCA501_PROP_COMP_THRESH(d) (((d) & 0x0e) >> 1) #define SPCA501_PROP_COMP_QUANT(d) (((d) & 0x70) >> 4) /* SPCA501 CCDSP control */ #define SPCA501_REG_CCDSP 0x01 /* SPCA501 control/status registers */ #define SPCA501_REG_CTLRL 0x02 /* registers for color correction and YUV transformation */ #define SPCA501_A11 0x08 #define SPCA501_A12 0x09 #define SPCA501_A13 0x0A #define SPCA501_A21 0x0B #define SPCA501_A22 0x0C #define SPCA501_A23 0x0D #define SPCA501_A31 0x0E #define SPCA501_A32 0x0F #define SPCA501_A33 0x10 /* Data for video camera initialization before capturing */ static const __u16 spca501_open_data[][3] = { /* bmRequest,value,index */ {0x2, 0x50, 0x00}, /* C/S enable soft reset */ {0x2, 0x40, 0x00}, /* C/S disable soft reset */ {0x2, 0x02, 0x05}, /* C/S general purpose I/O data */ {0x2, 0x03, 0x05}, /* C/S general purpose I/O data */ #ifdef CCDSP_SET {0x1, 0x38, 0x01}, /* CCDSP options */ {0x1, 0x05, 0x02}, /* CCDSP Optical black level for user settings */ {0x1, 0xC0, 0x03}, /* CCDSP Optical black settings */ {0x1, 0x67, 0x07}, {0x1, 0x63, 0x3f}, /* CCDSP CCD gamma enable */ {0x1, 0x03, 0x56}, /* Add gamma correction */ {0x1, 0xFF, 0x15}, /* CCDSP High luminance for white balance */ {0x1, 0x01, 0x16}, /* CCDSP Low luminance for white balance */ /* Color correction and RGB-to-YUV transformation coefficients changing */ #ifdef ALTER_GAMA {0x0, 0x00, 0x08}, /* A11 */ {0x0, 0x00, 0x09}, /* A12 */ {0x0, 0x90, 0x0A}, /* A13 */ {0x0, 0x12, 0x0B}, /* A21 */ {0x0, 0x00, 0x0C}, /* A22 */ {0x0, 0x00, 0x0D}, /* A23 */ {0x0, 0x00, 0x0E}, /* A31 */ {0x0, 0x02, 0x0F}, /* A32 */ {0x0, 0x00, 0x10}, /* A33 */ #else {0x1, 0x2a, 0x08}, /* A11 0x31 */ {0x1, 0xf8, 0x09}, /* A12 f8 */ {0x1, 0xf8, 0x0A}, /* A13 f8 */ {0x1, 0xf8, 0x0B}, /* A21 f8 */ {0x1, 0x14, 0x0C}, /* A22 0x14 */ {0x1, 0xf8, 0x0D}, /* A23 f8 */ {0x1, 0xf8, 0x0E}, /* A31 f8 */ {0x1, 0xf8, 0x0F}, /* A32 f8 */ {0x1, 0x20, 0x10}, /* A33 0x20 */ #endif {0x1, 0x00, 0x11}, /* R offset */ {0x1, 0x00, 0x12}, /* G offset */ {0x1, 0x00, 0x13}, /* B offset */ {0x1, 0x00, 0x14}, /* GB offset */ #endif #ifdef TG_SET /* Time generator manipulations */ {0x0, 0xfc, 0x0}, /* Set up high bits of shutter speed */ {0x0, 0x01, 0x1}, /* Set up low bits of shutter speed */ {0x0, 0xe4, 0x04}, /* DCLK*2 clock phase adjustment */ {0x0, 0x08, 0x05}, /* ADCK phase adjustment, inv. ext. VB */ {0x0, 0x03, 0x06}, /* FR phase adjustment */ {0x0, 0x01, 0x07}, /* FCDS phase adjustment */ {0x0, 0x39, 0x08}, /* FS phase adjustment */ {0x0, 0x88, 0x0a}, /* FH1 phase and delay adjustment */ {0x0, 0x03, 0x0f}, /* pixel identification */ {0x0, 0x00, 0x11}, /* clock source selection (default) */ /*VERY strange manipulations with * select DMCLP or OBPX to be ADCLP output (0x0C) * OPB always toggle or not (0x0D) but they allow * us to set up brightness */ {0x0, 0x01, 0x0c}, {0x0, 0xe0, 0x0d}, /* Done */ #endif #ifdef DSPWIN_SET {0x1, 0xa0, 0x01}, /* Setting image processing parameters */ {0x1, 0x1c, 0x17}, /* Changing Windows positions X1 */ {0x1, 0xe2, 0x19}, /* X2 */ {0x1, 0x1c, 0x1b}, /* X3 */ {0x1, 0xe2, 0x1d}, /* X4 */ {0x1, 0x5f, 0x1f}, /* X5 */ {0x1, 0x32, 0x20}, /* Y5 */ {0x1, 0x01, 0x10}, /* Changing A33 */ #endif {0x2, 0x204a, 0x07},/* Setting video compression & resolution 160x120 */ {0x2, 0x94, 0x06}, /* Setting video no compression */ {} }; /* The SPCAxxx docs from Sunplus document these values in tables, one table per register number. In the data below, dmRequest is the register number, index is the Addr, and value is a combination of Bit values. Bit Value (hex) 0 01 1 02 2 04 3 08 4 10 5 20 6 40 7 80 */ /* Data for chip initialization (set default values) */ static const __u16 spca501_init_data[][3] = { /* Set all the values to powerup defaults */ /* bmRequest,value,index */ {0x0, 0xAA, 0x00}, {0x0, 0x02, 0x01}, {0x0, 0x01, 0x02}, {0x0, 0x02, 0x03}, {0x0, 0xCE, 0x04}, {0x0, 0x00, 0x05}, {0x0, 0x00, 0x06}, {0x0, 0x00, 0x07}, {0x0, 0x00, 0x08}, {0x0, 0x00, 0x09}, {0x0, 0x90, 0x0A}, {0x0, 0x12, 0x0B}, {0x0, 0x00, 0x0C}, {0x0, 0x00, 0x0D}, {0x0, 0x00, 0x0E}, {0x0, 0x02, 0x0F}, {0x0, 0x00, 0x10}, {0x0, 0x00, 0x11}, {0x0, 0x00, 0x12}, {0x0, 0x00, 0x13}, {0x0, 0x00, 0x14}, {0x0, 0x00, 0x15}, {0x0, 0x00, 0x16}, {0x0, 0x00, 0x17}, {0x0, 0x00, 0x18}, {0x0, 0x00, 0x19}, {0x0, 0x00, 0x1A}, {0x0, 0x00, 0x1B}, {0x0, 0x00, 0x1C}, {0x0, 0x00, 0x1D}, {0x0, 0x00, 0x1E}, {0x0, 0x00, 0x1F}, {0x0, 0x00, 0x20}, {0x0, 0x00, 0x21}, {0x0, 0x00, 0x22}, {0x0, 0x00, 0x23}, {0x0, 0x00, 0x24}, {0x0, 0x00, 0x25}, {0x0, 0x00, 0x26}, {0x0, 0x00, 0x27}, {0x0, 0x00, 0x28}, {0x0, 0x00, 0x29}, {0x0, 0x00, 0x2A}, {0x0, 0x00, 0x2B}, {0x0, 0x00, 0x2C}, {0x0, 0x00, 0x2D}, {0x0, 0x00, 0x2E}, {0x0, 0x00, 0x2F}, {0x0, 0x00, 0x30}, {0x0, 0x00, 0x31}, {0x0, 0x00, 0x32}, {0x0, 0x00, 0x33}, {0x0, 0x00, 0x34}, {0x0, 0x00, 0x35}, {0x0, 0x00, 0x36}, {0x0, 0x00, 0x37}, {0x0, 0x00, 0x38}, {0x0, 0x00, 0x39}, {0x0, 0x00, 0x3A}, {0x0, 0x00, 0x3B}, {0x0, 0x00, 0x3C}, {0x0, 0x00, 0x3D}, {0x0, 0x00, 0x3E}, {0x0, 0x00, 0x3F}, {0x0, 0x00, 0x40}, {0x0, 0x00, 0x41}, {0x0, 0x00, 0x42}, {0x0, 0x00, 0x43}, {0x0, 0x00, 0x44}, {0x0, 0x00, 0x45}, {0x0, 0x00, 0x46}, {0x0, 0x00, 0x47}, {0x0, 0x00, 0x48}, {0x0, 0x00, 0x49}, {0x0, 0x00, 0x4A}, {0x0, 0x00, 0x4B}, {0x0, 0x00, 0x4C}, {0x0, 0x00, 0x4D}, {0x0, 0x00, 0x4E}, {0x0, 0x00, 0x4F}, {0x0, 0x00, 0x50}, {0x0, 0x00, 0x51}, {0x0, 0x00, 0x52}, {0x0, 0x00, 0x53}, {0x0, 0x00, 0x54}, {0x0, 0x00, 0x55}, {0x0, 0x00, 0x56}, {0x0, 0x00, 0x57}, {0x0, 0x00, 0x58}, {0x0, 0x00, 0x59}, {0x0, 0x00, 0x5A}, {0x0, 0x00, 0x5B}, {0x0, 0x00, 0x5C}, {0x0, 0x00, 0x5D}, {0x0, 0x00, 0x5E}, {0x0, 0x00, 0x5F}, {0x0, 0x00, 0x60}, {0x0, 0x00, 0x61}, {0x0, 0x00, 0x62}, {0x0, 0x00, 0x63}, {0x0, 0x00, 0x64}, {0x0, 0x00, 0x65}, {0x0, 0x00, 0x66}, {0x0, 0x00, 0x67}, {0x0, 0x00, 0x68}, {0x0, 0x00, 0x69}, {0x0, 0x00, 0x6A}, {0x0, 0x00, 0x6B}, {0x0, 0x00, 0x6C}, {0x0, 0x00, 0x6D}, {0x0, 0x00, 0x6E}, {0x0, 0x00, 0x6F}, {0x0, 0x00, 0x70}, {0x0, 0x00, 0x71}, {0x0, 0x00, 0x72}, {0x0, 0x00, 0x73}, {0x0, 0x00, 0x74}, {0x0, 0x00, 0x75}, {0x0, 0x00, 0x76}, {0x0, 0x00, 0x77}, {0x0, 0x00, 0x78}, {0x0, 0x00, 0x79}, {0x0, 0x00, 0x7A}, {0x0, 0x00, 0x7B}, {0x0, 0x00, 0x7C}, {0x0, 0x00, 0x7D}, {0x0, 0x00, 0x7E}, {0x0, 0x00, 0x7F}, {0x0, 0x00, 0x80}, {0x0, 0x00, 0x81}, {0x0, 0x00, 0x82}, {0x0, 0x00, 0x83}, {0x0, 0x00, 0x84}, {0x0, 0x00, 0x85}, {0x0, 0x00, 0x86}, {0x0, 0x00, 0x87}, {0x0, 0x00, 0x88}, {0x0, 0x00, 0x89}, {0x0, 0x00, 0x8A}, {0x0, 0x00, 0x8B}, {0x0, 0x00, 0x8C}, {0x0, 0x00, 0x8D}, {0x0, 0x00, 0x8E}, {0x0, 0x00, 0x8F}, {0x0, 0x00, 0x90}, {0x0, 0x00, 0x91}, {0x0, 0x00, 0x92}, {0x0, 0x00, 0x93}, {0x0, 0x00, 0x94}, {0x0, 0x00, 0x95}, {0x0, 0x00, 0x96}, {0x0, 0x00, 0x97}, {0x0, 0x00, 0x98}, {0x0, 0x00, 0x99}, {0x0, 0x00, 0x9A}, {0x0, 0x00, 0x9B}, {0x0, 0x00, 0x9C}, {0x0, 0x00, 0x9D}, {0x0, 0x00, 0x9E}, {0x0, 0x00, 0x9F}, {0x0, 0x00, 0xA0}, {0x0, 0x00, 0xA1}, {0x0, 0x00, 0xA2}, {0x0, 0x00, 0xA3}, {0x0, 0x00, 0xA4}, {0x0, 0x00, 0xA5}, {0x0, 0x00, 0xA6}, {0x0, 0x00, 0xA7}, {0x0, 0x00, 0xA8}, {0x0, 0x00, 0xA9}, {0x0, 0x00, 0xAA}, {0x0, 0x00, 0xAB}, {0x0, 0x00, 0xAC}, {0x0, 0x00, 0xAD}, {0x0, 0x00, 0xAE}, {0x0, 0x00, 0xAF}, {0x0, 0x00, 0xB0}, {0x0, 0x00, 0xB1}, {0x0, 0x00, 0xB2}, {0x0, 0x00, 0xB3}, {0x0, 0x00, 0xB4}, {0x0, 0x00, 0xB5}, {0x0, 0x00, 0xB6}, {0x0, 0x00, 0xB7}, {0x0, 0x00, 0xB8}, {0x0, 0x00, 0xB9}, {0x0, 0x00, 0xBA}, {0x0, 0x00, 0xBB}, {0x0, 0x00, 0xBC}, {0x0, 0x00, 0xBD}, {0x0, 0x00, 0xBE}, {0x0, 0x00, 0xBF}, {0x0, 0x00, 0xC0}, {0x0, 0x00, 0xC1}, {0x0, 0x00, 0xC2}, {0x0, 0x00, 0xC3}, {0x0, 0x00, 0xC4}, {0x0, 0x00, 0xC5}, {0x0, 0x00, 0xC6}, {0x0, 0x00, 0xC7}, {0x0, 0x00, 0xC8}, {0x0, 0x00, 0xC9}, {0x0, 0x00, 0xCA}, {0x0, 0x00, 0xCB}, {0x0, 0x00, 0xCC}, {0x1, 0xF4, 0x00}, {0x1, 0x38, 0x01}, {0x1, 0x40, 0x02}, {0x1, 0x0A, 0x03}, {0x1, 0x40, 0x04}, {0x1, 0x40, 0x05}, {0x1, 0x40, 0x06}, {0x1, 0x67, 0x07}, {0x1, 0x31, 0x08}, {0x1, 0x00, 0x09}, {0x1, 0x00, 0x0A}, {0x1, 0x00, 0x0B}, {0x1, 0x14, 0x0C}, {0x1, 0x00, 0x0D}, {0x1, 0x00, 0x0E}, {0x1, 0x00, 0x0F}, {0x1, 0x1E, 0x10}, {0x1, 0x00, 0x11}, {0x1, 0x00, 0x12}, {0x1, 0x00, 0x13}, {0x1, 0x00, 0x14}, {0x1, 0xFF, 0x15}, {0x1, 0x01, 0x16}, {0x1, 0x32, 0x17}, {0x1, 0x23, 0x18}, {0x1, 0xCE, 0x19}, {0x1, 0x23, 0x1A}, {0x1, 0x32, 0x1B}, {0x1, 0x8D, 0x1C}, {0x1, 0xCE, 0x1D}, {0x1, 0x8D, 0x1E}, {0x1, 0x00, 0x1F}, {0x1, 0x00, 0x20}, {0x1, 0xFF, 0x3E}, {0x1, 0x02, 0x3F}, {0x1, 0x00, 0x40}, {0x1, 0x00, 0x41}, {0x1, 0x00, 0x42}, {0x1, 0x00, 0x43}, {0x1, 0x00, 0x44}, {0x1, 0x00, 0x45}, {0x1, 0x00, 0x46}, {0x1, 0x00, 0x47}, {0x1, 0x00, 0x48}, {0x1, 0x00, 0x49}, {0x1, 0x00, 0x4A}, {0x1, 0x00, 0x4B}, {0x1, 0x00, 0x4C}, {0x1, 0x00, 0x4D}, {0x1, 0x00, 0x4E}, {0x1, 0x00, 0x4F}, {0x1, 0x00, 0x50}, {0x1, 0x00, 0x51}, {0x1, 0x00, 0x52}, {0x1, 0x00, 0x53}, {0x1, 0x00, 0x54}, {0x1, 0x00, 0x55}, {0x1, 0x00, 0x56}, {0x1, 0x00, 0x57}, {0x1, 0x00, 0x58}, {0x1, 0x00, 0x59}, {0x1, 0x00, 0x5A}, {0x2, 0x03, 0x00}, {0x2, 0x00, 0x01}, {0x2, 0x00, 0x05}, {0x2, 0x00, 0x06}, {0x2, 0x00, 0x07}, {0x2, 0x00, 0x10}, {0x2, 0x00, 0x11}, /* Strange - looks like the 501 driver doesn't do anything * at insert time except read the EEPROM */ {} }; /* Data for video camera init before capture. * Capture and decoding by Colin Peart. * This is is for the 3com HomeConnect Lite which is spca501a based. */ static const __u16 spca501_3com_open_data[][3] = { /* bmRequest,value,index */ {0x2, 0x0050, 0x0000}, /* C/S Enable TG soft reset, timing mode=010 */ {0x2, 0x0043, 0x0000}, /* C/S Disable TG soft reset, timing mode=010 */ {0x2, 0x0002, 0x0005}, /* C/S GPIO */ {0x2, 0x0003, 0x0005}, /* C/S GPIO */ #ifdef CCDSP_SET {0x1, 0x0020, 0x0001}, /* CCDSP Options */ {0x1, 0x0020, 0x0002}, /* CCDSP Black Level */ {0x1, 0x006e, 0x0007}, /* CCDSP Gamma options */ {0x1, 0x0090, 0x0015}, /* CCDSP Luminance Low */ {0x1, 0x00ff, 0x0016}, /* CCDSP Luminance High */ {0x1, 0x0003, 0x003F}, /* CCDSP Gamma correction toggle */ #ifdef ALTER_GAMMA {0x1, 0x0010, 0x0008}, /* CCDSP YUV A11 */ {0x1, 0x0000, 0x0009}, /* CCDSP YUV A12 */ {0x1, 0x0000, 0x000a}, /* CCDSP YUV A13 */ {0x1, 0x0000, 0x000b}, /* CCDSP YUV A21 */ {0x1, 0x0010, 0x000c}, /* CCDSP YUV A22 */ {0x1, 0x0000, 0x000d}, /* CCDSP YUV A23 */ {0x1, 0x0000, 0x000e}, /* CCDSP YUV A31 */ {0x1, 0x0000, 0x000f}, /* CCDSP YUV A32 */ {0x1, 0x0010, 0x0010}, /* CCDSP YUV A33 */ {0x1, 0x0000, 0x0011}, /* CCDSP R Offset */ {0x1, 0x0000, 0x0012}, /* CCDSP G Offset */ {0x1, 0x0001, 0x0013}, /* CCDSP B Offset */ {0x1, 0x0001, 0x0014}, /* CCDSP BG Offset */ {0x1, 0x003f, 0x00C1}, /* CCDSP Gamma Correction Enable */ #endif #endif #ifdef TG_SET {0x0, 0x00fc, 0x0000}, /* TG Shutter Speed High Bits */ {0x0, 0x0000, 0x0001}, /* TG Shutter Speed Low Bits */ {0x0, 0x00e4, 0x0004}, /* TG DCLK*2 Adjust */ {0x0, 0x0008, 0x0005}, /* TG ADCK Adjust */ {0x0, 0x0003, 0x0006}, /* TG FR Phase Adjust */ {0x0, 0x0001, 0x0007}, /* TG FCDS Phase Adjust */ {0x0, 0x0039, 0x0008}, /* TG FS Phase Adjust */ {0x0, 0x0088, 0x000a}, /* TG MH1 */ {0x0, 0x0003, 0x000f}, /* TG Pixel ID */ /* Like below, unexplained toglleing */ {0x0, 0x0080, 0x000c}, {0x0, 0x0000, 0x000d}, {0x0, 0x0080, 0x000c}, {0x0, 0x0004, 0x000d}, {0x0, 0x0000, 0x000c}, {0x0, 0x0000, 0x000d}, {0x0, 0x0040, 0x000c}, {0x0, 0x0017, 0x000d}, {0x0, 0x00c0, 0x000c}, {0x0, 0x0000, 0x000d}, {0x0, 0x0080, 0x000c}, {0x0, 0x0006, 0x000d}, {0x0, 0x0080, 0x000c}, {0x0, 0x0004, 0x000d}, {0x0, 0x0002, 0x0003}, #endif #ifdef DSPWIN_SET {0x1, 0x001c, 0x0017}, /* CCDSP W1 Start X */ {0x1, 0x00e2, 0x0019}, /* CCDSP W2 Start X */ {0x1, 0x001c, 0x001b}, /* CCDSP W3 Start X */ {0x1, 0x00e2, 0x001d}, /* CCDSP W4 Start X */ {0x1, 0x00aa, 0x001f}, /* CCDSP W5 Start X */ {0x1, 0x0070, 0x0020}, /* CCDSP W5 Start Y */ #endif {0x0, 0x0001, 0x0010}, /* TG Start Clock */ /* {0x2, 0x006a, 0x0001}, * C/S Enable ISOSYNCH Packet Engine */ {0x2, 0x0068, 0x0001}, /* C/S Diable ISOSYNCH Packet Engine */ {0x2, 0x0000, 0x0005}, {0x2, 0x0043, 0x0000}, /* C/S Set Timing Mode, Disable TG soft reset */ {0x2, 0x0043, 0x0000}, /* C/S Set Timing Mode, Disable TG soft reset */ {0x2, 0x0002, 0x0005}, /* C/S GPIO */ {0x2, 0x0003, 0x0005}, /* C/S GPIO */ {0x2, 0x006a, 0x0001}, /* C/S Enable ISOSYNCH Packet Engine */ {} }; /* * Data used to initialize a SPCA501C with HV7131B sensor. * From a capture file taken with USBSnoop v 1.5 * I have a "SPCA501C pc camera chipset" manual by sunplus, but some * of the value meanings are obscure or simply "reserved". * to do list: * 1) Understand what every value means * 2) Understand why some values seem to appear more than once * 3) Write a small comment for each line of the following arrays. */ static const __u16 spca501c_arowana_open_data[][3] = { /* bmRequest,value,index */ {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x01, 0x0006, 0x0011}, {0x01, 0x00ff, 0x0012}, {0x01, 0x0014, 0x0013}, {0x01, 0x0000, 0x0014}, {0x01, 0x0042, 0x0051}, {0x01, 0x0040, 0x0052}, {0x01, 0x0051, 0x0053}, {0x01, 0x0040, 0x0054}, {0x01, 0x0000, 0x0055}, {0x00, 0x0025, 0x0000}, {0x00, 0x0026, 0x0000}, {0x00, 0x0001, 0x0000}, {0x00, 0x0027, 0x0000}, {0x00, 0x008a, 0x0000}, {} }; static const __u16 spca501c_arowana_init_data[][3] = { /* bmRequest,value,index */ {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x01, 0x0006, 0x0011}, {0x01, 0x00ff, 0x0012}, {0x01, 0x0014, 0x0013}, {0x01, 0x0000, 0x0014}, {0x01, 0x0042, 0x0051}, {0x01, 0x0040, 0x0052}, {0x01, 0x0051, 0x0053}, {0x01, 0x0040, 0x0054}, {0x01, 0x0000, 0x0055}, {0x00, 0x0025, 0x0000}, {0x00, 0x0026, 0x0000}, {0x00, 0x0001, 0x0000}, {0x00, 0x0027, 0x0000}, {0x00, 0x008a, 0x0000}, {0x02, 0x0000, 0x0005}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0xfffd, 0x000a}, {0x01, 0x0023, 0x000b}, {0x01, 0xffea, 0x000c}, {0x01, 0xfff4, 0x000d}, {0x01, 0xfffc, 0x000e}, {0x01, 0xffe3, 0x000f}, {0x01, 0x001f, 0x0010}, {0x01, 0x00a8, 0x0001}, {0x01, 0x0067, 0x0007}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x00c8, 0x0015}, {0x01, 0x0032, 0x0016}, {0x01, 0x0000, 0x0011}, {0x01, 0x0000, 0x0012}, {0x01, 0x0000, 0x0013}, {0x01, 0x000a, 0x0003}, {0x02, 0xc002, 0x0001}, {0x02, 0x0007, 0x0005}, {0x02, 0xc000, 0x0001}, {0x02, 0x0000, 0x0005}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x000f, 0x0000}, {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0xfffd, 0x000a}, {0x01, 0x0023, 0x000b}, {0x01, 0xffea, 0x000c}, {0x01, 0xfff4, 0x000d}, {0x01, 0xfffc, 0x000e}, {0x01, 0xffe3, 0x000f}, {0x01, 0x001f, 0x0010}, {0x01, 0x00a8, 0x0001}, {0x01, 0x0067, 0x0007}, {0x01, 0x0042, 0x0051}, {0x01, 0x0051, 0x0053}, {0x01, 0x000a, 0x0003}, {0x02, 0xc002, 0x0001}, {0x02, 0x0007, 0x0005}, {0x02, 0xc000, 0x0001}, {0x02, 0x0000, 0x0005}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x001e, 0x0000}, {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0xfffd, 0x000a}, {0x01, 0x0023, 0x000b}, {0x01, 0xffea, 0x000c}, {0x01, 0xfff4, 0x000d}, {0x01, 0xfffc, 0x000e}, {0x01, 0xffe3, 0x000f}, {0x01, 0x001f, 0x0010}, {0x01, 0x00a8, 0x0001}, {0x01, 0x0067, 0x0007}, {0x01, 0x0042, 0x0051}, {0x01, 0x0051, 0x0053}, {0x01, 0x000a, 0x0003}, {0x02, 0xc002, 0x0001}, {0x02, 0x0007, 0x0005}, {0x01, 0x0042, 0x0051}, {0x01, 0x0051, 0x0053}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x002d, 0x0000}, {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x02, 0xc000, 0x0001}, {0x02, 0x0000, 0x0005}, {} }; /* Unknown camera from Ori Usbid 0x0000:0x0000 */ /* Based on snoops from Ori Cohen */ static const __u16 spca501c_mysterious_open_data[][3] = { {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, /* DSP Registers */ {0x01, 0x0016, 0x0011}, /* RGB offset */ {0x01, 0x0000, 0x0012}, {0x01, 0x0006, 0x0013}, {0x01, 0x0078, 0x0051}, {0x01, 0x0040, 0x0052}, {0x01, 0x0046, 0x0053}, {0x01, 0x0040, 0x0054}, {0x00, 0x0025, 0x0000}, /* {0x00, 0x0000, 0x0000 }, */ /* Part 2 */ /* TG Registers */ {0x00, 0x0026, 0x0000}, {0x00, 0x0001, 0x0000}, {0x00, 0x0027, 0x0000}, {0x00, 0x008a, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {} }; /* Based on snoops from Ori Cohen */ static const __u16 spca501c_mysterious_init_data[][3] = { /* Part 3 */ /* TG registers */ /* {0x00, 0x0000, 0x0000}, */ {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x0006, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0001, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, /* 640 */ {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, /* 480 */ {0x00, 0x0000, 0x0024}, /* Offset H hight */ {0x00, 0x00d3, 0x0025}, /* low */ {0x00, 0x0000, 0x0026}, /* Offset V */ {0x00, 0x000d, 0x0027}, /* low */ {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, /* DSP Registers */ {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, /* Level Calc bit7 ->1 Auto */ {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x000f, 0x0008}, /* A11 Color correction coeff */ {0x01, 0x002d, 0x0009}, /* A12 */ {0x01, 0x0005, 0x000a}, /* A13 */ {0x01, 0x0023, 0x000b}, /* A21 */ {0x01, 0x00e0, 0x000c}, /* A22 */ {0x01, 0x00fd, 0x000d}, /* A23 */ {0x01, 0x00f4, 0x000e}, /* A31 */ {0x01, 0x00e4, 0x000f}, /* A32 */ {0x01, 0x0028, 0x0010}, /* A33 */ {0x01, 0x00ff, 0x0015}, /* Reserved */ {0x01, 0x0001, 0x0016}, /* Reserved */ {0x01, 0x0032, 0x0017}, /* Win1 Start begin */ {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, /* Win1 Start end */ {0x01, 0x00ff, 0x003e}, /* Reserved begin */ {0x01, 0x0002, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0003, 0x0056}, /* Reserved end */ {0x01, 0x0060, 0x0057}, /* Edge Gain */ {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, /* Edge Bandwidth */ {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x200a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc000, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, /* Part 4 */ {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x02, 0x0000, 0x0005}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x004e, 0x0000}, /* Part 5 */ {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x01, 0x000f, 0x0008}, {0x01, 0x002d, 0x0009}, {0x01, 0x0005, 0x000a}, {0x01, 0x0023, 0x000b}, {0x01, 0xffe0, 0x000c}, {0x01, 0xfffd, 0x000d}, {0x01, 0xfff4, 0x000e}, {0x01, 0xffe4, 0x000f}, {0x01, 0x0028, 0x0010}, {0x01, 0x00a8, 0x0001}, {0x01, 0x0066, 0x0007}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x00c8, 0x0015}, /* c8 Poids fort Luma */ {0x01, 0x0032, 0x0016}, /* 32 */ {0x01, 0x0016, 0x0011}, /* R 00 */ {0x01, 0x0016, 0x0012}, /* G 00 */ {0x01, 0x0016, 0x0013}, /* B 00 */ {0x01, 0x000a, 0x0003}, {0x02, 0xc002, 0x0001}, {0x02, 0x0007, 0x0005}, {} }; static int reg_write(struct gspca_dev *gspca_dev, __u16 req, __u16 index, __u16 value) { int ret; struct usb_device *dev = gspca_dev->dev; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), req, USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); PDEBUG(D_USBO, "reg write: 0x%02x 0x%02x 0x%02x", req, index, value); if (ret < 0) pr_err("reg write: error %d\n", ret); return ret; } static int write_vector(struct gspca_dev *gspca_dev, const __u16 data[][3]) { int ret, i = 0; while (data[i][0] != 0 || data[i][1] != 0 || data[i][2] != 0) { ret = reg_write(gspca_dev, data[i][0], data[i][2], data[i][1]); if (ret < 0) { PERR("Reg write failed for 0x%02x,0x%02x,0x%02x", data[i][0], data[i][1], data[i][2]); return ret; } i++; } return 0; } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { reg_write(gspca_dev, SPCA501_REG_CCDSP, 0x12, val); } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { reg_write(gspca_dev, 0x00, 0x00, (val >> 8) & 0xff); reg_write(gspca_dev, 0x00, 0x01, val & 0xff); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { reg_write(gspca_dev, SPCA501_REG_CCDSP, 0x0c, val); } static void setblue_balance(struct gspca_dev *gspca_dev, s32 val) { reg_write(gspca_dev, SPCA501_REG_CCDSP, 0x11, val); } static void setred_balance(struct gspca_dev *gspca_dev, s32 val) { reg_write(gspca_dev, SPCA501_REG_CCDSP, 0x13, val); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); sd->subtype = id->driver_info; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->subtype) { case Arowana300KCMOSCamera: case SmileIntlCamera: /* Arowana 300k CMOS Camera data */ if (write_vector(gspca_dev, spca501c_arowana_init_data)) goto error; break; case MystFromOriUnknownCamera: /* Unknown Ori CMOS Camera data */ if (write_vector(gspca_dev, spca501c_mysterious_open_data)) goto error; break; default: /* generic spca501 init data */ if (write_vector(gspca_dev, spca501_init_data)) goto error; break; } PDEBUG(D_STREAM, "Initializing SPCA501 finished"); return 0; error: return -EINVAL; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode; switch (sd->subtype) { case ThreeComHomeConnectLite: /* Special handling for 3com data */ write_vector(gspca_dev, spca501_3com_open_data); break; case Arowana300KCMOSCamera: case SmileIntlCamera: /* Arowana 300k CMOS Camera data */ write_vector(gspca_dev, spca501c_arowana_open_data); break; case MystFromOriUnknownCamera: /* Unknown CMOS Camera data */ write_vector(gspca_dev, spca501c_mysterious_init_data); break; default: /* Generic 501 open data */ write_vector(gspca_dev, spca501_open_data); } /* memorize the wanted pixel format */ mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; /* Enable ISO packet machine CTRL reg=2, * index=1 bitmask=0x2 (bit ordinal 1) */ reg_write(gspca_dev, SPCA50X_REG_USB, 0x6, 0x94); switch (mode) { case 0: /* 640x480 */ reg_write(gspca_dev, SPCA50X_REG_USB, 0x07, 0x004a); break; case 1: /* 320x240 */ reg_write(gspca_dev, SPCA50X_REG_USB, 0x07, 0x104a); break; default: /* case 2: * 160x120 */ reg_write(gspca_dev, SPCA50X_REG_USB, 0x07, 0x204a); break; } reg_write(gspca_dev, SPCA501_REG_CTLRL, 0x01, 0x02); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { /* Disable ISO packet * machine CTRL reg=2, index=1 bitmask=0x0 (bit ordinal 1) */ reg_write(gspca_dev, SPCA501_REG_CTLRL, 0x01, 0x00); } /* called on streamoff with alt 0 and on disconnect */ static void sd_stop0(struct gspca_dev *gspca_dev) { if (!gspca_dev->present) return; reg_write(gspca_dev, SPCA501_REG_CTLRL, 0x05, 0x00); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { switch (data[0]) { case 0: /* start of frame */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); data += SPCA501_OFFSET_DATA; len -= SPCA501_OFFSET_DATA; gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; case 0xff: /* drop */ /* gspca_dev->last_packet_type = DISCARD_PACKET; */ return; } data++; len--; gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_BLUE_BALANCE: setblue_balance(gspca_dev, ctrl->val); break; case V4L2_CID_RED_BALANCE: setred_balance(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 5); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 127, 1, 0); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 64725, 1, 64725); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 63, 1, 20); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 127, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x040a, 0x0002), .driver_info = KodakDVC325}, {USB_DEVICE(0x0497, 0xc001), .driver_info = SmileIntlCamera}, {USB_DEVICE(0x0506, 0x00df), .driver_info = ThreeComHomeConnectLite}, {USB_DEVICE(0x0733, 0x0401), .driver_info = IntelCreateAndShare}, {USB_DEVICE(0x0733, 0x0402), .driver_info = ViewQuestM318B}, {USB_DEVICE(0x1776, 0x501c), .driver_info = Arowana300KCMOSCamera}, {USB_DEVICE(0x0000, 0x0000), .driver_info = MystFromOriUnknownCamera}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
CoolDevelopment/MoshKernel-amami
arch/powerpc/platforms/powermac/cpufreq_32.c
4410
18732
/* * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> * Copyright (C) 2004 John Steele Scott <toojays@toojays.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * TODO: Need a big cleanup here. Basically, we need to have different * cpufreq_driver structures for the different type of HW instead of the * current mess. We also need to better deal with the detection of the * type of machine. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/cpufreq.h> #include <linux/init.h> #include <linux/device.h> #include <linux/hardirq.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/irq.h> #include <asm/pmac_feature.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/cputable.h> #include <asm/time.h> #include <asm/mpic.h> #include <asm/keylargo.h> #include <asm/switch_to.h> /* WARNING !!! This will cause calibrate_delay() to be called, * but this is an __init function ! So you MUST go edit * init/main.c to make it non-init before enabling DEBUG_FREQ */ #undef DEBUG_FREQ extern void low_choose_7447a_dfs(int dfs); extern void low_choose_750fx_pll(int pll); extern void low_sleep_handler(void); /* * Currently, PowerMac cpufreq supports only high & low frequencies * that are set by the firmware */ static unsigned int low_freq; static unsigned int hi_freq; static unsigned int cur_freq; static unsigned int sleep_freq; /* * Different models uses different mechanisms to switch the frequency */ static int (*set_speed_proc)(int low_speed); static unsigned int (*get_speed_proc)(void); /* * Some definitions used by the various speedprocs */ static u32 voltage_gpio; static u32 frequency_gpio; static u32 slew_done_gpio; static int no_schedule; static int has_cpu_l2lve; static int is_pmu_based; /* There are only two frequency states for each processor. Values * are in kHz for the time being. */ #define CPUFREQ_HIGH 0 #define CPUFREQ_LOW 1 static struct cpufreq_frequency_table pmac_cpu_freqs[] = { {CPUFREQ_HIGH, 0}, {CPUFREQ_LOW, 0}, {0, CPUFREQ_TABLE_END}, }; static struct freq_attr* pmac_cpu_freqs_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static inline void local_delay(unsigned long ms) { if (no_schedule) mdelay(ms); else msleep(ms); } #ifdef DEBUG_FREQ static inline void debug_calc_bogomips(void) { /* This will cause a recalc of bogomips and display the * result. We backup/restore the value to avoid affecting the * core cpufreq framework's own calculation. */ unsigned long save_lpj = loops_per_jiffy; calibrate_delay(); loops_per_jiffy = save_lpj; } #endif /* DEBUG_FREQ */ /* Switch CPU speed under 750FX CPU control */ static int cpu_750fx_cpu_speed(int low_speed) { u32 hid2; if (low_speed == 0) { /* ramping up, set voltage first */ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); /* Make sure we sleep for at least 1ms */ local_delay(10); /* tweak L2 for high voltage */ if (has_cpu_l2lve) { hid2 = mfspr(SPRN_HID2); hid2 &= ~0x2000; mtspr(SPRN_HID2, hid2); } } #ifdef CONFIG_6xx low_choose_750fx_pll(low_speed); #endif if (low_speed == 1) { /* tweak L2 for low voltage */ if (has_cpu_l2lve) { hid2 = mfspr(SPRN_HID2); hid2 |= 0x2000; mtspr(SPRN_HID2, hid2); } /* ramping down, set voltage last */ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); local_delay(10); } return 0; } static unsigned int cpu_750fx_get_cpu_speed(void) { if (mfspr(SPRN_HID1) & HID1_PS) return low_freq; else return hi_freq; } /* Switch CPU speed using DFS */ static int dfs_set_cpu_speed(int low_speed) { if (low_speed == 0) { /* ramping up, set voltage first */ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); /* Make sure we sleep for at least 1ms */ local_delay(1); } /* set frequency */ #ifdef CONFIG_6xx low_choose_7447a_dfs(low_speed); #endif udelay(100); if (low_speed == 1) { /* ramping down, set voltage last */ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); local_delay(1); } return 0; } static unsigned int dfs_get_cpu_speed(void) { if (mfspr(SPRN_HID1) & HID1_DFS) return low_freq; else return hi_freq; } /* Switch CPU speed using slewing GPIOs */ static int gpios_set_cpu_speed(int low_speed) { int gpio, timeout = 0; /* If ramping up, set voltage first */ if (low_speed == 0) { pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); /* Delay is way too big but it's ok, we schedule */ local_delay(10); } /* Set frequency */ gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0); if (low_speed == ((gpio & 0x01) == 0)) goto skip; pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio, low_speed ? 0x04 : 0x05); udelay(200); do { if (++timeout > 100) break; local_delay(1); gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0); } while((gpio & 0x02) == 0); skip: /* If ramping down, set voltage last */ if (low_speed == 1) { pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); /* Delay is way too big but it's ok, we schedule */ local_delay(10); } #ifdef DEBUG_FREQ debug_calc_bogomips(); #endif return 0; } /* Switch CPU speed under PMU control */ static int pmu_set_cpu_speed(int low_speed) { struct adb_request req; unsigned long save_l2cr; unsigned long save_l3cr; unsigned int pic_prio; unsigned long flags; preempt_disable(); #ifdef DEBUG_FREQ printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1)); #endif pmu_suspend(); /* Disable all interrupt sources on openpic */ pic_prio = mpic_cpu_get_priority(); mpic_cpu_set_priority(0xf); /* Make sure the decrementer won't interrupt us */ asm volatile("mtdec %0" : : "r" (0x7fffffff)); /* Make sure any pending DEC interrupt occurring while we did * the above didn't re-enable the DEC */ mb(); asm volatile("mtdec %0" : : "r" (0x7fffffff)); /* We can now disable MSR_EE */ local_irq_save(flags); /* Giveup the FPU & vec */ enable_kernel_fp(); #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) enable_kernel_altivec(); #endif /* CONFIG_ALTIVEC */ /* Save & disable L2 and L3 caches */ save_l3cr = _get_L3CR(); /* (returns -1 if not available) */ save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ /* Send the new speed command. My assumption is that this command * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep */ pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed); while (!req.complete) pmu_poll(); /* Prepare the northbridge for the speed transition */ pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1); /* Call low level code to backup CPU state and recover from * hardware reset */ low_sleep_handler(); /* Restore the northbridge */ pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0); /* Restore L2 cache */ if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) _set_L2CR(save_l2cr); /* Restore L3 cache */ if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0) _set_L3CR(save_l3cr); /* Restore userland MMU context */ switch_mmu_context(NULL, current->active_mm); #ifdef DEBUG_FREQ printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1)); #endif /* Restore low level PMU operations */ pmu_unlock(); /* * Restore decrementer; we'll take a decrementer interrupt * as soon as interrupts are re-enabled and the generic * clockevents code will reprogram it with the right value. */ set_dec(1); /* Restore interrupts */ mpic_cpu_set_priority(pic_prio); /* Let interrupts flow again ... */ local_irq_restore(flags); #ifdef DEBUG_FREQ debug_calc_bogomips(); #endif pmu_resume(); preempt_enable(); return 0; } static int do_set_cpu_speed(int speed_mode, int notify) { struct cpufreq_freqs freqs; unsigned long l3cr; static unsigned long prev_l3cr; freqs.old = cur_freq; freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; freqs.cpu = smp_processor_id(); if (freqs.old == freqs.new) return 0; if (notify) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); if (speed_mode == CPUFREQ_LOW && cpu_has_feature(CPU_FTR_L3CR)) { l3cr = _get_L3CR(); if (l3cr & L3CR_L3E) { prev_l3cr = l3cr; _set_L3CR(0); } } set_speed_proc(speed_mode == CPUFREQ_LOW); if (speed_mode == CPUFREQ_HIGH && cpu_has_feature(CPU_FTR_L3CR)) { l3cr = _get_L3CR(); if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr) _set_L3CR(prev_l3cr); } if (notify) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; return 0; } static unsigned int pmac_cpufreq_get_speed(unsigned int cpu) { return cur_freq; } static int pmac_cpufreq_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); } static int pmac_cpufreq_target( struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0; int rc; if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs, target_freq, relation, &newstate)) return -EINVAL; rc = do_set_cpu_speed(newstate, 1); ppc_proc_freq = cur_freq * 1000ul; return rc; } static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) { if (policy->cpu != 0) return -ENODEV; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; policy->cur = cur_freq; cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu); return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); } static u32 read_gpio(struct device_node *np) { const u32 *reg = of_get_property(np, "reg", NULL); u32 offset; if (reg == NULL) return 0; /* That works for all keylargos but shall be fixed properly * some day... The problem is that it seems we can't rely * on the "reg" property of the GPIO nodes, they are either * relative to the base of KeyLargo or to the base of the * GPIO space, and the device-tree doesn't help. */ offset = *reg; if (offset < KEYLARGO_GPIO_LEVELS0) offset += KEYLARGO_GPIO_LEVELS0; return offset; } static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) { /* Ok, this could be made a bit smarter, but let's be robust for now. We * always force a speed change to high speed before sleep, to make sure * we have appropriate voltage and/or bus speed for the wakeup process, * and to make sure our loops_per_jiffies are "good enough", that is will * not cause too short delays if we sleep in low speed and wake in high * speed.. */ no_schedule = 1; sleep_freq = cur_freq; if (cur_freq == low_freq && !is_pmu_based) do_set_cpu_speed(CPUFREQ_HIGH, 0); return 0; } static int pmac_cpufreq_resume(struct cpufreq_policy *policy) { /* If we resume, first check if we have a get() function */ if (get_speed_proc) cur_freq = get_speed_proc(); else cur_freq = 0; /* We don't, hrm... we don't really know our speed here, best * is that we force a switch to whatever it was, which is * probably high speed due to our suspend() routine */ do_set_cpu_speed(sleep_freq == low_freq ? CPUFREQ_LOW : CPUFREQ_HIGH, 0); ppc_proc_freq = cur_freq * 1000ul; no_schedule = 0; return 0; } static struct cpufreq_driver pmac_cpufreq_driver = { .verify = pmac_cpufreq_verify, .target = pmac_cpufreq_target, .get = pmac_cpufreq_get_speed, .init = pmac_cpufreq_cpu_init, .suspend = pmac_cpufreq_suspend, .resume = pmac_cpufreq_resume, .flags = CPUFREQ_PM_NO_WARN, .attr = pmac_cpu_freqs_attr, .name = "powermac", .owner = THIS_MODULE, }; static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) { struct device_node *volt_gpio_np = of_find_node_by_name(NULL, "voltage-gpio"); struct device_node *freq_gpio_np = of_find_node_by_name(NULL, "frequency-gpio"); struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL, "slewing-done"); const u32 *value; /* * Check to see if it's GPIO driven or PMU only * * The way we extract the GPIO address is slightly hackish, but it * works well enough for now. We need to abstract the whole GPIO * stuff sooner or later anyway */ if (volt_gpio_np) voltage_gpio = read_gpio(volt_gpio_np); if (freq_gpio_np) frequency_gpio = read_gpio(freq_gpio_np); if (slew_done_gpio_np) slew_done_gpio = read_gpio(slew_done_gpio_np); /* If we use the frequency GPIOs, calculate the min/max speeds based * on the bus frequencies */ if (frequency_gpio && slew_done_gpio) { int lenp, rc; const u32 *freqs, *ratio; freqs = of_get_property(cpunode, "bus-frequencies", &lenp); lenp /= sizeof(u32); if (freqs == NULL || lenp != 2) { printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n"); return 1; } ratio = of_get_property(cpunode, "processor-to-bus-ratio*2", NULL); if (ratio == NULL) { printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n"); return 1; } /* Get the min/max bus frequencies */ low_freq = min(freqs[0], freqs[1]); hi_freq = max(freqs[0], freqs[1]); /* Grrrr.. It _seems_ that the device-tree is lying on the low bus * frequency, it claims it to be around 84Mhz on some models while * it appears to be approx. 101Mhz on all. Let's hack around here... * fortunately, we don't need to be too precise */ if (low_freq < 98000000) low_freq = 101000000; /* Convert those to CPU core clocks */ low_freq = (low_freq * (*ratio)) / 2000; hi_freq = (hi_freq * (*ratio)) / 2000; /* Now we get the frequencies, we read the GPIO to see what is out current * speed */ rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0); cur_freq = (rc & 0x01) ? hi_freq : low_freq; set_speed_proc = gpios_set_cpu_speed; return 1; } /* If we use the PMU, look for the min & max frequencies in the * device-tree */ value = of_get_property(cpunode, "min-clock-frequency", NULL); if (!value) return 1; low_freq = (*value) / 1000; /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree * here */ if (low_freq < 100000) low_freq *= 10; value = of_get_property(cpunode, "max-clock-frequency", NULL); if (!value) return 1; hi_freq = (*value) / 1000; set_speed_proc = pmu_set_cpu_speed; is_pmu_based = 1; return 0; } static int pmac_cpufreq_init_7447A(struct device_node *cpunode) { struct device_node *volt_gpio_np; if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL) return 1; volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); if (volt_gpio_np) voltage_gpio = read_gpio(volt_gpio_np); if (!voltage_gpio){ printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n"); return 1; } /* OF only reports the high frequency */ hi_freq = cur_freq; low_freq = cur_freq/2; /* Read actual frequency from CPU */ cur_freq = dfs_get_cpu_speed(); set_speed_proc = dfs_set_cpu_speed; get_speed_proc = dfs_get_cpu_speed; return 0; } static int pmac_cpufreq_init_750FX(struct device_node *cpunode) { struct device_node *volt_gpio_np; u32 pvr; const u32 *value; if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL) return 1; hi_freq = cur_freq; value = of_get_property(cpunode, "reduced-clock-frequency", NULL); if (!value) return 1; low_freq = (*value) / 1000; volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); if (volt_gpio_np) voltage_gpio = read_gpio(volt_gpio_np); pvr = mfspr(SPRN_PVR); has_cpu_l2lve = !((pvr & 0xf00) == 0x100); set_speed_proc = cpu_750fx_cpu_speed; get_speed_proc = cpu_750fx_get_cpu_speed; cur_freq = cpu_750fx_get_cpu_speed(); return 0; } /* Currently, we support the following machines: * * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz) * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz) * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz) * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz) * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz) * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage) * - Recent MacRISC3 laptops * - All new machines with 7447A CPUs */ static int __init pmac_cpufreq_setup(void) { struct device_node *cpunode; const u32 *value; if (strstr(cmd_line, "nocpufreq")) return 0; /* Assume only one CPU */ cpunode = of_find_node_by_type(NULL, "cpu"); if (!cpunode) goto out; /* Get current cpu clock freq */ value = of_get_property(cpunode, "clock-frequency", NULL); if (!value) goto out; cur_freq = (*value) / 1000; /* Check for 7447A based MacRISC3 */ if (of_machine_is_compatible("MacRISC3") && of_get_property(cpunode, "dynamic-power-step", NULL) && PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { pmac_cpufreq_init_7447A(cpunode); /* Check for other MacRISC3 machines */ } else if (of_machine_is_compatible("PowerBook3,4") || of_machine_is_compatible("PowerBook3,5") || of_machine_is_compatible("MacRISC3")) { pmac_cpufreq_init_MacRISC3(cpunode); /* Else check for iBook2 500/600 */ } else if (of_machine_is_compatible("PowerBook4,1")) { hi_freq = cur_freq; low_freq = 400000; set_speed_proc = pmu_set_cpu_speed; is_pmu_based = 1; } /* Else check for TiPb 550 */ else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) { hi_freq = cur_freq; low_freq = 500000; set_speed_proc = pmu_set_cpu_speed; is_pmu_based = 1; } /* Else check for TiPb 400 & 500 */ else if (of_machine_is_compatible("PowerBook3,2")) { /* We only know about the 400 MHz and the 500Mhz model * they both have 300 MHz as low frequency */ if (cur_freq < 350000 || cur_freq > 550000) goto out; hi_freq = cur_freq; low_freq = 300000; set_speed_proc = pmu_set_cpu_speed; is_pmu_based = 1; } /* Else check for 750FX */ else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000) pmac_cpufreq_init_750FX(cpunode); out: of_node_put(cpunode); if (set_speed_proc == NULL) return -ENODEV; pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq; pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq; ppc_proc_freq = cur_freq * 1000ul; printk(KERN_INFO "Registering PowerMac CPU frequency driver\n"); printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n", low_freq/1000, hi_freq/1000, cur_freq/1000); return cpufreq_register_driver(&pmac_cpufreq_driver); } module_init(pmac_cpufreq_setup);
gpl-2.0
m2mselect/owrt
DLpatch/linux-3.18.29/arch/x86/platform/olpc/olpc.c
4410
10143
/* * Support for the OLPC DCON and OLPC EC access * * Copyright © 2006 Advanced Micro Devices, Inc. * Copyright © 2007-2008 Andres Salomon <dilinger@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/syscore_ops.h> #include <linux/mutex.h> #include <linux/olpc-ec.h> #include <asm/geode.h> #include <asm/setup.h> #include <asm/olpc.h> #include <asm/olpc_ofw.h> struct olpc_platform_t olpc_platform_info; EXPORT_SYMBOL_GPL(olpc_platform_info); /* EC event mask to be applied during suspend (defining wakeup sources). */ static u16 ec_wakeup_mask; /* what the timeout *should* be (in ms) */ #define EC_BASE_TIMEOUT 20 /* the timeout that bugs in the EC might force us to actually use */ static int ec_timeout = EC_BASE_TIMEOUT; static int __init olpc_ec_timeout_set(char *str) { if (get_option(&str, &ec_timeout) != 1) { ec_timeout = EC_BASE_TIMEOUT; printk(KERN_ERR "olpc-ec: invalid argument to " "'olpc_ec_timeout=', ignoring!\n"); } printk(KERN_DEBUG "olpc-ec: using %d ms delay for EC commands.\n", ec_timeout); return 1; } __setup("olpc_ec_timeout=", olpc_ec_timeout_set); /* * These {i,o}bf_status functions return whether the buffers are full or not. */ static inline unsigned int ibf_status(unsigned int port) { return !!(inb(port) & 0x02); } static inline unsigned int obf_status(unsigned int port) { return inb(port) & 0x01; } #define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d)) static int __wait_on_ibf(unsigned int line, unsigned int port, int desired) { unsigned int timeo; int state = ibf_status(port); for (timeo = ec_timeout; state != desired && timeo; timeo--) { mdelay(1); state = ibf_status(port); } if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && timeo < (ec_timeout - EC_BASE_TIMEOUT)) { printk(KERN_WARNING "olpc-ec: %d: waited %u ms for IBF!\n", line, ec_timeout - timeo); } return !(state == desired); } #define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d)) static int __wait_on_obf(unsigned int line, unsigned int port, int desired) { unsigned int timeo; int state = obf_status(port); for (timeo = ec_timeout; state != desired && timeo; timeo--) { mdelay(1); state = obf_status(port); } if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && timeo < (ec_timeout - EC_BASE_TIMEOUT)) { printk(KERN_WARNING "olpc-ec: %d: waited %u ms for OBF!\n", line, ec_timeout - timeo); } return !(state == desired); } /* * This allows the kernel to run Embedded Controller commands. The EC is * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the * available EC commands are here: * <http://wiki.laptop.org/go/Ec_specification>. Unfortunately, while * OpenFirmware's source is available, the EC's is not. */ static int olpc_xo1_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, void *arg) { int ret = -EIO; int i; int restarts = 0; /* Clear OBF */ for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++) inb(0x68); if (i == 10) { printk(KERN_ERR "olpc-ec: timeout while attempting to " "clear OBF flag!\n"); goto err; } if (wait_on_ibf(0x6c, 0)) { printk(KERN_ERR "olpc-ec: timeout waiting for EC to " "quiesce!\n"); goto err; } restart: /* * Note that if we time out during any IBF checks, that's a failure; * we have to return. There's no way for the kernel to clear that. * * If we time out during an OBF check, we can restart the command; * reissuing it will clear the OBF flag, and we should be alright. * The OBF flag will sometimes misbehave due to what we believe * is a hardware quirk.. */ pr_devel("olpc-ec: running cmd 0x%x\n", cmd); outb(cmd, 0x6c); if (wait_on_ibf(0x6c, 0)) { printk(KERN_ERR "olpc-ec: timeout waiting for EC to read " "command!\n"); goto err; } if (inbuf && inlen) { /* write data to EC */ for (i = 0; i < inlen; i++) { pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]); outb(inbuf[i], 0x68); if (wait_on_ibf(0x6c, 0)) { printk(KERN_ERR "olpc-ec: timeout waiting for" " EC accept data!\n"); goto err; } } } if (outbuf && outlen) { /* read data from EC */ for (i = 0; i < outlen; i++) { if (wait_on_obf(0x6c, 1)) { printk(KERN_ERR "olpc-ec: timeout waiting for" " EC to provide data!\n"); if (restarts++ < 10) goto restart; goto err; } outbuf[i] = inb(0x68); pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); } } ret = 0; err: return ret; } void olpc_ec_wakeup_set(u16 value) { ec_wakeup_mask |= value; } EXPORT_SYMBOL_GPL(olpc_ec_wakeup_set); void olpc_ec_wakeup_clear(u16 value) { ec_wakeup_mask &= ~value; } EXPORT_SYMBOL_GPL(olpc_ec_wakeup_clear); /* * Returns true if the compile and runtime configurations allow for EC events * to wake the system. */ bool olpc_ec_wakeup_available(void) { if (!machine_is_olpc()) return false; /* * XO-1 EC wakeups are available when olpc-xo1-sci driver is * compiled in */ #ifdef CONFIG_OLPC_XO1_SCI if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) /* XO-1 */ return true; #endif /* * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is * compiled in */ #ifdef CONFIG_OLPC_XO15_SCI if (olpc_platform_info.boardrev >= olpc_board_pre(0xd0)) /* XO-1.5 */ return true; #endif return false; } EXPORT_SYMBOL_GPL(olpc_ec_wakeup_available); int olpc_ec_mask_write(u16 bits) { if (olpc_platform_info.flags & OLPC_F_EC_WIDE_SCI) { __be16 ec_word = cpu_to_be16(bits); return olpc_ec_cmd(EC_WRITE_EXT_SCI_MASK, (void *) &ec_word, 2, NULL, 0); } else { unsigned char ec_byte = bits & 0xff; return olpc_ec_cmd(EC_WRITE_SCI_MASK, &ec_byte, 1, NULL, 0); } } EXPORT_SYMBOL_GPL(olpc_ec_mask_write); int olpc_ec_sci_query(u16 *sci_value) { int ret; if (olpc_platform_info.flags & OLPC_F_EC_WIDE_SCI) { __be16 ec_word; ret = olpc_ec_cmd(EC_EXT_SCI_QUERY, NULL, 0, (void *) &ec_word, 2); if (ret == 0) *sci_value = be16_to_cpu(ec_word); } else { unsigned char ec_byte; ret = olpc_ec_cmd(EC_SCI_QUERY, NULL, 0, &ec_byte, 1); if (ret == 0) *sci_value = ec_byte; } return ret; } EXPORT_SYMBOL_GPL(olpc_ec_sci_query); static bool __init check_ofw_architecture(struct device_node *root) { const char *olpc_arch; int propsize; olpc_arch = of_get_property(root, "architecture", &propsize); return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0; } static u32 __init get_board_revision(struct device_node *root) { int propsize; const __be32 *rev; rev = of_get_property(root, "board-revision-int", &propsize); if (propsize != 4) return 0; return be32_to_cpu(*rev); } static bool __init platform_detect(void) { struct device_node *root = of_find_node_by_path("/"); bool success; if (!root) return false; success = check_ofw_architecture(root); if (success) { olpc_platform_info.boardrev = get_board_revision(root); olpc_platform_info.flags |= OLPC_F_PRESENT; } of_node_put(root); return success; } static int __init add_xo1_platform_devices(void) { struct platform_device *pdev; pdev = platform_device_register_simple("xo1-rfkill", -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); return 0; } static int olpc_xo1_ec_probe(struct platform_device *pdev) { /* get the EC revision */ olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, (unsigned char *) &olpc_platform_info.ecver, 1); /* EC version 0x5f adds support for wide SCI mask */ if (olpc_platform_info.ecver >= 0x5f) olpc_platform_info.flags |= OLPC_F_EC_WIDE_SCI; pr_info("OLPC board revision %s%X (EC=%x)\n", ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", olpc_platform_info.boardrev >> 4, olpc_platform_info.ecver); return 0; } static int olpc_xo1_ec_suspend(struct platform_device *pdev) { olpc_ec_mask_write(ec_wakeup_mask); /* * Squelch SCIs while suspended. This is a fix for * <http://dev.laptop.org/ticket/1835>. */ return olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0); } static int olpc_xo1_ec_resume(struct platform_device *pdev) { /* Tell the EC to stop inhibiting SCIs */ olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0); /* * Tell the wireless module to restart USB communication. * Must be done twice. */ olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0); return 0; } static struct olpc_ec_driver ec_xo1_driver = { .probe = olpc_xo1_ec_probe, .suspend = olpc_xo1_ec_suspend, .resume = olpc_xo1_ec_resume, .ec_cmd = olpc_xo1_ec_cmd, }; static struct olpc_ec_driver ec_xo1_5_driver = { .probe = olpc_xo1_ec_probe, .ec_cmd = olpc_xo1_ec_cmd, }; static int __init olpc_init(void) { int r = 0; if (!olpc_ofw_present() || !platform_detect()) return 0; /* register the XO-1 and 1.5-specific EC handler */ if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) /* XO-1 */ olpc_ec_driver_register(&ec_xo1_driver, NULL); else olpc_ec_driver_register(&ec_xo1_5_driver, NULL); platform_device_register_simple("olpc-ec", -1, NULL, 0); /* assume B1 and above models always have a DCON */ if (olpc_board_at_least(olpc_board(0xb1))) olpc_platform_info.flags |= OLPC_F_DCON; #ifdef CONFIG_PCI_OLPC /* If the VSA exists let it emulate PCI, if not emulate in kernel. * XO-1 only. */ if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) && !cs5535_has_vsa2()) x86_init.pci.arch_init = pci_olpc_init; #endif if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */ r = add_xo1_platform_devices(); if (r) return r; } return 0; } postcore_initcall(olpc_init);
gpl-2.0
utilite2/linux-kernel
fs/9p/vfs_dentry.c
4922
3560
/* * linux/fs/9p/vfs_dentry.c * * This file contians vfs dentry ops for the 9P2000 protocol. * * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/inet.h> #include <linux/namei.h> #include <linux/idr.h> #include <linux/sched.h> #include <linux/slab.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" /** * v9fs_dentry_delete - called when dentry refcount equals 0 * @dentry: dentry in question * * By returning 1 here we should remove cacheing of unused * dentry components. * */ static int v9fs_dentry_delete(const struct dentry *dentry) { p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, dentry); return 1; } /** * v9fs_cached_dentry_delete - called when dentry refcount equals 0 * @dentry: dentry in question * */ static int v9fs_cached_dentry_delete(const struct dentry *dentry) { p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, dentry); /* Don't cache negative dentries */ if (!dentry->d_inode) return 1; return 0; } /** * v9fs_dentry_release - called when dentry is going to be freed * @dentry: dentry that is being release * */ static void v9fs_dentry_release(struct dentry *dentry) { struct v9fs_dentry *dent; struct p9_fid *temp, *current_fid; p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, dentry); dent = dentry->d_fsdata; if (dent) { list_for_each_entry_safe(current_fid, temp, &dent->fidlist, dlist) { p9_client_clunk(current_fid); } kfree(dent); dentry->d_fsdata = NULL; } } static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) { struct p9_fid *fid; struct inode *inode; struct v9fs_inode *v9inode; if (nd->flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; if (!inode) goto out_valid; v9inode = V9FS_I(inode); if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) { int retval; struct v9fs_session_info *v9ses; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); v9ses = v9fs_inode2v9ses(inode); if (v9fs_proto_dotl(v9ses)) retval = v9fs_refresh_inode_dotl(fid, inode); else retval = v9fs_refresh_inode(fid, inode); if (retval == -ENOENT) return 0; if (retval < 0) return retval; } out_valid: return 1; } const struct dentry_operations v9fs_cached_dentry_operations = { .d_revalidate = v9fs_lookup_revalidate, .d_delete = v9fs_cached_dentry_delete, .d_release = v9fs_dentry_release, }; const struct dentry_operations v9fs_dentry_operations = { .d_delete = v9fs_dentry_delete, .d_release = v9fs_dentry_release, };
gpl-2.0
DirtyUnicorns/android_kernel_samsung_manta
net/nfc/nci/rsp.c
4922
6186
/* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <ilane@ti.com> * * Acknowledgements: * This file is based on hci_event.c, which was written * by Maxim Krasnyansky. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> /* Handle NCI Response packets */ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_reset_rsp *rsp = (void *) skb->data; pr_debug("status 0x%x\n", rsp->status); if (rsp->status == NCI_STATUS_OK) { ndev->nci_ver = rsp->nci_ver; pr_debug("nci_ver 0x%x, config_status 0x%x\n", rsp->nci_ver, rsp->config_status); } nci_req_complete(ndev, rsp->status); } static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data; struct nci_core_init_rsp_2 *rsp_2; pr_debug("status 0x%x\n", rsp_1->status); if (rsp_1->status != NCI_STATUS_OK) goto exit; ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features); ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces; if (ndev->num_supported_rf_interfaces > NCI_MAX_SUPPORTED_RF_INTERFACES) { ndev->num_supported_rf_interfaces = NCI_MAX_SUPPORTED_RF_INTERFACES; } memcpy(ndev->supported_rf_interfaces, rsp_1->supported_rf_interfaces, ndev->num_supported_rf_interfaces); rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); ndev->max_logical_connections = rsp_2->max_logical_connections; ndev->max_routing_table_size = __le16_to_cpu(rsp_2->max_routing_table_size); ndev->max_ctrl_pkt_payload_len = rsp_2->max_ctrl_pkt_payload_len; ndev->max_size_for_large_params = __le16_to_cpu(rsp_2->max_size_for_large_params); ndev->manufact_id = rsp_2->manufact_id; ndev->manufact_specific_info = __le32_to_cpu(rsp_2->manufact_specific_info); pr_debug("nfcc_features 0x%x\n", ndev->nfcc_features); pr_debug("num_supported_rf_interfaces %d\n", ndev->num_supported_rf_interfaces); pr_debug("supported_rf_interfaces[0] 0x%x\n", ndev->supported_rf_interfaces[0]); pr_debug("supported_rf_interfaces[1] 0x%x\n", ndev->supported_rf_interfaces[1]); pr_debug("supported_rf_interfaces[2] 0x%x\n", ndev->supported_rf_interfaces[2]); pr_debug("supported_rf_interfaces[3] 0x%x\n", ndev->supported_rf_interfaces[3]); pr_debug("max_logical_connections %d\n", ndev->max_logical_connections); pr_debug("max_routing_table_size %d\n", ndev->max_routing_table_size); pr_debug("max_ctrl_pkt_payload_len %d\n", ndev->max_ctrl_pkt_payload_len); pr_debug("max_size_for_large_params %d\n", ndev->max_size_for_large_params); pr_debug("manufact_id 0x%x\n", ndev->manufact_id); pr_debug("manufact_specific_info 0x%x\n", ndev->manufact_specific_info); exit: nci_req_complete(ndev, rsp_1->status); } static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); nci_req_complete(ndev, status); } static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); if (status == NCI_STATUS_OK) atomic_set(&ndev->state, NCI_DISCOVERY); nci_req_complete(ndev, status); } static void nci_rf_disc_select_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); /* Complete the request on intf_activated_ntf or generic_error_ntf */ if (status != NCI_STATUS_OK) nci_req_complete(ndev, status); } static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); /* If target was active, complete the request only in deactivate_ntf */ if ((status != NCI_STATUS_OK) || (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) { nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); nci_req_complete(ndev, status); } } void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 rsp_opcode = nci_opcode(skb->data); /* we got a rsp, stop the cmd timer */ del_timer(&ndev->cmd_timer); pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", nci_pbf(skb->data), nci_opcode_gid(rsp_opcode), nci_opcode_oid(rsp_opcode), nci_plen(skb->data)); /* strip the nci control header */ skb_pull(skb, NCI_CTRL_HDR_SIZE); switch (rsp_opcode) { case NCI_OP_CORE_RESET_RSP: nci_core_reset_rsp_packet(ndev, skb); break; case NCI_OP_CORE_INIT_RSP: nci_core_init_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_MAP_RSP: nci_rf_disc_map_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_RSP: nci_rf_disc_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_SELECT_RSP: nci_rf_disc_select_rsp_packet(ndev, skb); break; case NCI_OP_RF_DEACTIVATE_RSP: nci_rf_deactivate_rsp_packet(ndev, skb); break; default: pr_err("unknown rsp opcode 0x%x\n", rsp_opcode); break; } kfree_skb(skb); /* trigger the next cmd */ atomic_set(&ndev->cmd_cnt, 1); if (!skb_queue_empty(&ndev->cmd_q)) queue_work(ndev->cmd_wq, &ndev->cmd_work); }
gpl-2.0
JonnyXDA/android_kernel_ulefone_metal
arch/powerpc/platforms/cell/spu_priv1_mmio.c
9530
4745
/* * spu hypervisor abstraction for direct hardware access. * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * Copyright 2006 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/ptrace.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/sched.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> #include <asm/prom.h> #include "interrupt.h" #include "spu_priv1_mmio.h" static void int_mask_and(struct spu *spu, int class, u64 mask) { u64 old_mask; old_mask = in_be64(&spu->priv1->int_mask_RW[class]); out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); } static void int_mask_or(struct spu *spu, int class, u64 mask) { u64 old_mask; old_mask = in_be64(&spu->priv1->int_mask_RW[class]); out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); } static void int_mask_set(struct spu *spu, int class, u64 mask) { out_be64(&spu->priv1->int_mask_RW[class], mask); } static u64 int_mask_get(struct spu *spu, int class) { return in_be64(&spu->priv1->int_mask_RW[class]); } static void int_stat_clear(struct spu *spu, int class, u64 stat) { out_be64(&spu->priv1->int_stat_RW[class], stat); } static u64 int_stat_get(struct spu *spu, int class) { return in_be64(&spu->priv1->int_stat_RW[class]); } static void cpu_affinity_set(struct spu *spu, int cpu) { u64 target; u64 route; if (nr_cpus_node(spu->node)) { const struct cpumask *spumask = cpumask_of_node(spu->node), *cpumask = cpumask_of_node(cpu_to_node(cpu)); if (!cpumask_intersects(spumask, cpumask)) return; } target = iic_get_target_id(cpu); route = target << 48 | target << 32 | target << 16; out_be64(&spu->priv1->int_route_RW, route); } static u64 mfc_dar_get(struct spu *spu) { return in_be64(&spu->priv1->mfc_dar_RW); } static u64 mfc_dsisr_get(struct spu *spu) { return in_be64(&spu->priv1->mfc_dsisr_RW); } static void mfc_dsisr_set(struct spu *spu, u64 dsisr) { out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); } static void mfc_sdr_setup(struct spu *spu) { out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); } static void mfc_sr1_set(struct spu *spu, u64 sr1) { out_be64(&spu->priv1->mfc_sr1_RW, sr1); } static u64 mfc_sr1_get(struct spu *spu) { return in_be64(&spu->priv1->mfc_sr1_RW); } static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) { out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); } static u64 mfc_tclass_id_get(struct spu *spu) { return in_be64(&spu->priv1->mfc_tclass_id_RW); } static void tlb_invalidate(struct spu *spu) { out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); } static void resource_allocation_groupID_set(struct spu *spu, u64 id) { out_be64(&spu->priv1->resource_allocation_groupID_RW, id); } static u64 resource_allocation_groupID_get(struct spu *spu) { return in_be64(&spu->priv1->resource_allocation_groupID_RW); } static void resource_allocation_enable_set(struct spu *spu, u64 enable) { out_be64(&spu->priv1->resource_allocation_enable_RW, enable); } static u64 resource_allocation_enable_get(struct spu *spu) { return in_be64(&spu->priv1->resource_allocation_enable_RW); } const struct spu_priv1_ops spu_priv1_mmio_ops = { .int_mask_and = int_mask_and, .int_mask_or = int_mask_or, .int_mask_set = int_mask_set, .int_mask_get = int_mask_get, .int_stat_clear = int_stat_clear, .int_stat_get = int_stat_get, .cpu_affinity_set = cpu_affinity_set, .mfc_dar_get = mfc_dar_get, .mfc_dsisr_get = mfc_dsisr_get, .mfc_dsisr_set = mfc_dsisr_set, .mfc_sdr_setup = mfc_sdr_setup, .mfc_sr1_set = mfc_sr1_set, .mfc_sr1_get = mfc_sr1_get, .mfc_tclass_id_set = mfc_tclass_id_set, .mfc_tclass_id_get = mfc_tclass_id_get, .tlb_invalidate = tlb_invalidate, .resource_allocation_groupID_set = resource_allocation_groupID_set, .resource_allocation_groupID_get = resource_allocation_groupID_get, .resource_allocation_enable_set = resource_allocation_enable_set, .resource_allocation_enable_get = resource_allocation_enable_get, };
gpl-2.0
Voyager1/xbmc
xbmc/addons/GUIViewStateAddonBrowser.cpp
59
2499
/* * Copyright (C) 2005-2013 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "GUIViewStateAddonBrowser.h" #include "addons/Addon.h" #include "addons/AddonManager.h" #include "FileItem.h" #include "filesystem/File.h" #include "guilib/GraphicContext.h" #include "guilib/WindowIDs.h" #include "view/ViewState.h" #include "utils/URIUtils.h" #include "utils/StringUtils.h" using namespace XFILE; using namespace ADDON; CGUIViewStateAddonBrowser::CGUIViewStateAddonBrowser(const CFileItemList& items) : CGUIViewState(items) { if (URIUtils::PathEquals(items.GetPath(), "addons://")) { AddSortMethod(SortByNone, 551, LABEL_MASKS("%F", "", "%L", "")); SetSortMethod(SortByNone); } else if (URIUtils::PathEquals(items.GetPath(), "addons://recently_updated/", true)) { AddSortMethod(SortByLastUpdated, 12014, LABEL_MASKS("%L", "%v", "%L", "%v"), SortAttributeIgnoreFolders, SortOrderDescending); } else { AddSortMethod(SortByLabel, SortAttributeIgnoreFolders, 551, LABEL_MASKS("%L", "%s", "%L", "%s")); if (StringUtils::StartsWith(items.GetPath(), "addons://sources/")) AddSortMethod(SortByLastUsed, 12012, LABEL_MASKS("%L", "%u", "%L", "%u"), SortAttributeIgnoreFolders, SortOrderDescending); //Label, Last used if (StringUtils::StartsWith(items.GetPath(), "addons://user/") && items.GetContent() == "addons") AddSortMethod(SortByInstallDate, 12013, LABEL_MASKS("%L", "%i", "%L", "%i"), SortAttributeIgnoreFolders, SortOrderDescending); SetSortMethod(SortByLabel); } SetViewAsControl(DEFAULT_VIEW_AUTO); LoadViewState(items.GetPath(), WINDOW_ADDON_BROWSER); } void CGUIViewStateAddonBrowser::SaveViewState() { SaveViewToDb(m_items.GetPath(), WINDOW_ADDON_BROWSER); } std::string CGUIViewStateAddonBrowser::GetExtensions() { return ""; }
gpl-2.0
IntelBUAP/Repo-Linux-RT
drivers/power/test_power.c
571
14541
/* * Power supply driver for testing. * * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com> * * Dynamic module parameter code from the Virtual Battery Driver * Copyright (C) 2008 Pylone, Inc. * By: Masashi YOKOTA <yokota@pylone.jp> * Originally found here: * http://downloads.pylone.jp/src/virtual_battery/virtual_battery-0.0.1.tar.bz2 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/vermagic.h> enum test_power_id { TEST_AC, TEST_BATTERY, TEST_USB, TEST_POWER_NUM, }; static int ac_online = 1; static int usb_online = 1; static int battery_status = POWER_SUPPLY_STATUS_DISCHARGING; static int battery_health = POWER_SUPPLY_HEALTH_GOOD; static int battery_present = 1; /* true */ static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION; static int battery_capacity = 50; static int battery_voltage = 3300; static bool module_initialized; static int test_power_get_ac_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = ac_online; break; default: return -EINVAL; } return 0; } static int test_power_get_usb_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = usb_online; break; default: return -EINVAL; } return 0; } static int test_power_get_battery_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = "Test battery"; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = "Linux"; break; case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = UTS_RELEASE; break; case POWER_SUPPLY_PROP_STATUS: val->intval = battery_status; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; break; case POWER_SUPPLY_PROP_HEALTH: val->intval = battery_health; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = battery_present; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = battery_technology; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; break; case POWER_SUPPLY_PROP_CAPACITY: case POWER_SUPPLY_PROP_CHARGE_NOW: val->intval = battery_capacity; break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_CHARGE_FULL: val->intval = 100; break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: val->intval = 3600; break; case POWER_SUPPLY_PROP_TEMP: val->intval = 26; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = battery_voltage; break; default: pr_info("%s: some properties deliberately report errors.\n", __func__); return -EINVAL; } return 0; } static enum power_supply_property test_power_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static enum power_supply_property test_power_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_SERIAL_NUMBER, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_VOLTAGE_NOW, }; static char *test_power_ac_supplied_to[] = { "test_battery", }; static struct power_supply *test_power_supplies[TEST_POWER_NUM]; static const struct power_supply_desc test_power_desc[] = { [TEST_AC] = { .name = "test_ac", .type = POWER_SUPPLY_TYPE_MAINS, .properties = test_power_ac_props, .num_properties = ARRAY_SIZE(test_power_ac_props), .get_property = test_power_get_ac_property, }, [TEST_BATTERY] = { .name = "test_battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = test_power_battery_props, .num_properties = ARRAY_SIZE(test_power_battery_props), .get_property = test_power_get_battery_property, }, [TEST_USB] = { .name = "test_usb", .type = POWER_SUPPLY_TYPE_USB, .properties = test_power_ac_props, .num_properties = ARRAY_SIZE(test_power_ac_props), .get_property = test_power_get_usb_property, }, }; static const struct power_supply_config test_power_configs[] = { { /* test_ac */ .supplied_to = test_power_ac_supplied_to, .num_supplicants = ARRAY_SIZE(test_power_ac_supplied_to), }, { /* test_battery */ }, { /* test_usb */ .supplied_to = test_power_ac_supplied_to, .num_supplicants = ARRAY_SIZE(test_power_ac_supplied_to), }, }; static int __init test_power_init(void) { int i; int ret; BUILD_BUG_ON(TEST_POWER_NUM != ARRAY_SIZE(test_power_supplies)); BUILD_BUG_ON(TEST_POWER_NUM != ARRAY_SIZE(test_power_configs)); for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) { test_power_supplies[i] = power_supply_register(NULL, &test_power_desc[i], &test_power_configs[i]); if (IS_ERR(test_power_supplies[i])) { pr_err("%s: failed to register %s\n", __func__, test_power_desc[i].name); ret = PTR_ERR(test_power_supplies[i]); goto failed; } } module_initialized = true; return 0; failed: while (--i >= 0) power_supply_unregister(test_power_supplies[i]); return ret; } module_init(test_power_init); static void __exit test_power_exit(void) { int i; /* Let's see how we handle changes... */ ac_online = 0; usb_online = 0; battery_status = POWER_SUPPLY_STATUS_DISCHARGING; for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) power_supply_changed(test_power_supplies[i]); pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n", __func__); ssleep(10); for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) power_supply_unregister(test_power_supplies[i]); module_initialized = false; } module_exit(test_power_exit); #define MAX_KEYLENGTH 256 struct battery_property_map { int value; char const *key; }; static struct battery_property_map map_ac_online[] = { { 0, "off" }, { 1, "on" }, { -1, NULL }, }; static struct battery_property_map map_status[] = { { POWER_SUPPLY_STATUS_CHARGING, "charging" }, { POWER_SUPPLY_STATUS_DISCHARGING, "discharging" }, { POWER_SUPPLY_STATUS_NOT_CHARGING, "not-charging" }, { POWER_SUPPLY_STATUS_FULL, "full" }, { -1, NULL }, }; static struct battery_property_map map_health[] = { { POWER_SUPPLY_HEALTH_GOOD, "good" }, { POWER_SUPPLY_HEALTH_OVERHEAT, "overheat" }, { POWER_SUPPLY_HEALTH_DEAD, "dead" }, { POWER_SUPPLY_HEALTH_OVERVOLTAGE, "overvoltage" }, { POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, "failure" }, { -1, NULL }, }; static struct battery_property_map map_present[] = { { 0, "false" }, { 1, "true" }, { -1, NULL }, }; static struct battery_property_map map_technology[] = { { POWER_SUPPLY_TECHNOLOGY_NiMH, "NiMH" }, { POWER_SUPPLY_TECHNOLOGY_LION, "LION" }, { POWER_SUPPLY_TECHNOLOGY_LIPO, "LIPO" }, { POWER_SUPPLY_TECHNOLOGY_LiFe, "LiFe" }, { POWER_SUPPLY_TECHNOLOGY_NiCd, "NiCd" }, { POWER_SUPPLY_TECHNOLOGY_LiMn, "LiMn" }, { -1, NULL }, }; static int map_get_value(struct battery_property_map *map, const char *key, int def_val) { char buf[MAX_KEYLENGTH]; int cr; strncpy(buf, key, MAX_KEYLENGTH); buf[MAX_KEYLENGTH-1] = '\0'; cr = strnlen(buf, MAX_KEYLENGTH) - 1; if (cr < 0) return def_val; if (buf[cr] == '\n') buf[cr] = '\0'; while (map->key) { if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0) return map->value; map++; } return def_val; } static const char *map_get_key(struct battery_property_map *map, int value, const char *def_key) { while (map->key) { if (map->value == value) return map->key; map++; } return def_key; } static inline void signal_power_supply_changed(struct power_supply *psy) { if (module_initialized) power_supply_changed(psy); } static int param_set_ac_online(const char *key, const struct kernel_param *kp) { ac_online = map_get_value(map_ac_online, key, ac_online); signal_power_supply_changed(test_power_supplies[TEST_AC]); return 0; } static int param_get_ac_online(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown")); return strlen(buffer); } static int param_set_usb_online(const char *key, const struct kernel_param *kp) { usb_online = map_get_value(map_ac_online, key, usb_online); signal_power_supply_changed(test_power_supplies[TEST_USB]); return 0; } static int param_get_usb_online(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown")); return strlen(buffer); } static int param_set_battery_status(const char *key, const struct kernel_param *kp) { battery_status = map_get_value(map_status, key, battery_status); signal_power_supply_changed(test_power_supplies[TEST_BATTERY]); return 0; } static int param_get_battery_status(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_status, battery_status, "unknown")); return strlen(buffer); } static int param_set_battery_health(const char *key, const struct kernel_param *kp) { battery_health = map_get_value(map_health, key, battery_health); signal_power_supply_changed(test_power_supplies[TEST_BATTERY]); return 0; } static int param_get_battery_health(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_health, battery_health, "unknown")); return strlen(buffer); } static int param_set_battery_present(const char *key, const struct kernel_param *kp) { battery_present = map_get_value(map_present, key, battery_present); signal_power_supply_changed(test_power_supplies[TEST_AC]); return 0; } static int param_get_battery_present(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_present, battery_present, "unknown")); return strlen(buffer); } static int param_set_battery_technology(const char *key, const struct kernel_param *kp) { battery_technology = map_get_value(map_technology, key, battery_technology); signal_power_supply_changed(test_power_supplies[TEST_BATTERY]); return 0; } static int param_get_battery_technology(char *buffer, const struct kernel_param *kp) { strcpy(buffer, map_get_key(map_technology, battery_technology, "unknown")); return strlen(buffer); } static int param_set_battery_capacity(const char *key, const struct kernel_param *kp) { int tmp; if (1 != sscanf(key, "%d", &tmp)) return -EINVAL; battery_capacity = tmp; signal_power_supply_changed(test_power_supplies[TEST_BATTERY]); return 0; } #define param_get_battery_capacity param_get_int static int param_set_battery_voltage(const char *key, const struct kernel_param *kp) { int tmp; if (1 != sscanf(key, "%d", &tmp)) return -EINVAL; battery_voltage = tmp; signal_power_supply_changed(test_power_supplies[TEST_BATTERY]); return 0; } #define param_get_battery_voltage param_get_int static const struct kernel_param_ops param_ops_ac_online = { .set = param_set_ac_online, .get = param_get_ac_online, }; static const struct kernel_param_ops param_ops_usb_online = { .set = param_set_usb_online, .get = param_get_usb_online, }; static const struct kernel_param_ops param_ops_battery_status = { .set = param_set_battery_status, .get = param_get_battery_status, }; static const struct kernel_param_ops param_ops_battery_present = { .set = param_set_battery_present, .get = param_get_battery_present, }; static const struct kernel_param_ops param_ops_battery_technology = { .set = param_set_battery_technology, .get = param_get_battery_technology, }; static const struct kernel_param_ops param_ops_battery_health = { .set = param_set_battery_health, .get = param_get_battery_health, }; static const struct kernel_param_ops param_ops_battery_capacity = { .set = param_set_battery_capacity, .get = param_get_battery_capacity, }; static const struct kernel_param_ops param_ops_battery_voltage = { .set = param_set_battery_voltage, .get = param_get_battery_voltage, }; #define param_check_ac_online(name, p) __param_check(name, p, void); #define param_check_usb_online(name, p) __param_check(name, p, void); #define param_check_battery_status(name, p) __param_check(name, p, void); #define param_check_battery_present(name, p) __param_check(name, p, void); #define param_check_battery_technology(name, p) __param_check(name, p, void); #define param_check_battery_health(name, p) __param_check(name, p, void); #define param_check_battery_capacity(name, p) __param_check(name, p, void); #define param_check_battery_voltage(name, p) __param_check(name, p, void); module_param(ac_online, ac_online, 0644); MODULE_PARM_DESC(ac_online, "AC charging state <on|off>"); module_param(usb_online, usb_online, 0644); MODULE_PARM_DESC(usb_online, "USB charging state <on|off>"); module_param(battery_status, battery_status, 0644); MODULE_PARM_DESC(battery_status, "battery status <charging|discharging|not-charging|full>"); module_param(battery_present, battery_present, 0644); MODULE_PARM_DESC(battery_present, "battery presence state <good|overheat|dead|overvoltage|failure>"); module_param(battery_technology, battery_technology, 0644); MODULE_PARM_DESC(battery_technology, "battery technology <NiMH|LION|LIPO|LiFe|NiCd|LiMn>"); module_param(battery_health, battery_health, 0644); MODULE_PARM_DESC(battery_health, "battery health state <good|overheat|dead|overvoltage|failure>"); module_param(battery_capacity, battery_capacity, 0644); MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)"); module_param(battery_voltage, battery_voltage, 0644); MODULE_PARM_DESC(battery_voltage, "battery voltage (millivolts)"); MODULE_DESCRIPTION("Power supply driver for testing"); MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
kularny/GeniSys.Kernel
drivers/net/gianfar_ptp.c
571
16576
/* * PTP 1588 clock using the eTSEC * * Copyright (C) 2010 OMICRON electronics GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/device.h> #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/timex.h> #include <linux/io.h> #include <linux/ptp_clock_kernel.h> #include "gianfar.h" /* * gianfar ptp registers * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 */ struct gianfar_ptp_registers { u32 tmr_ctrl; /* Timer control register */ u32 tmr_tevent; /* Timestamp event register */ u32 tmr_temask; /* Timer event mask register */ u32 tmr_pevent; /* Timestamp event register */ u32 tmr_pemask; /* Timer event mask register */ u32 tmr_stat; /* Timestamp status register */ u32 tmr_cnt_h; /* Timer counter high register */ u32 tmr_cnt_l; /* Timer counter low register */ u32 tmr_add; /* Timer drift compensation addend register */ u32 tmr_acc; /* Timer accumulator register */ u32 tmr_prsc; /* Timer prescale */ u8 res1[4]; u32 tmroff_h; /* Timer offset high */ u32 tmroff_l; /* Timer offset low */ u8 res2[8]; u32 tmr_alarm1_h; /* Timer alarm 1 high register */ u32 tmr_alarm1_l; /* Timer alarm 1 high register */ u32 tmr_alarm2_h; /* Timer alarm 2 high register */ u32 tmr_alarm2_l; /* Timer alarm 2 high register */ u8 res3[48]; u32 tmr_fiper1; /* Timer fixed period interval */ u32 tmr_fiper2; /* Timer fixed period interval */ u32 tmr_fiper3; /* Timer fixed period interval */ u8 res4[20]; u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ }; /* Bit definitions for the TMR_CTRL register */ #define ALM1P (1<<31) /* Alarm1 output polarity */ #define ALM2P (1<<30) /* Alarm2 output polarity */ #define FS (1<<28) /* FIPER start indication */ #define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ #define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ #define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ #define TCLK_PERIOD_MASK (0x3ff) #define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ #define FRD (1<<14) /* FIPER Realignment Disable */ #define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ #define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ #define ETEP2 (1<<9) /* External trigger 2 edge polarity */ #define ETEP1 (1<<8) /* External trigger 1 edge polarity */ #define COPH (1<<7) /* Generated clock output phase. */ #define CIPH (1<<6) /* External oscillator input clock phase */ #define TMSR (1<<5) /* Timer soft reset. */ #define BYP (1<<3) /* Bypass drift compensated clock */ #define TE (1<<2) /* 1588 timer enable. */ #define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ #define CKSEL_MASK (0x3) /* Bit definitions for the TMR_TEVENT register */ #define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ #define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ #define ALM2 (1<<17) /* Current time = alarm time register 2 */ #define ALM1 (1<<16) /* Current time = alarm time register 1 */ #define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ #define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ #define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ /* Bit definitions for the TMR_TEMASK register */ #define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ #define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ #define ALM2EN (1<<17) /* Timer ALM2 event enable */ #define ALM1EN (1<<16) /* Timer ALM1 event enable */ #define PP1EN (1<<7) /* Periodic pulse event 1 enable */ #define PP2EN (1<<6) /* Periodic pulse event 2 enable */ /* Bit definitions for the TMR_PEVENT register */ #define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ #define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ #define RXP (1<<0) /* PTP frame has been received */ /* Bit definitions for the TMR_PEMASK register */ #define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ #define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ #define RXPEN (1<<0) /* Receive PTP packet event enable */ /* Bit definitions for the TMR_STAT register */ #define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ #define STAT_VEC_MASK (0x3f) /* Bit definitions for the TMR_PRSC register */ #define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ #define PRSC_OCK_MASK (0xffff) #define DRIVER "gianfar_ptp" #define DEFAULT_CKSEL 1 #define N_ALARM 1 /* first alarm is used internally to reset fipers */ #define N_EXT_TS 2 #define REG_SIZE sizeof(struct gianfar_ptp_registers) struct etsects { struct gianfar_ptp_registers *regs; spinlock_t lock; /* protects regs */ struct ptp_clock *clock; struct ptp_clock_info caps; struct resource *rsrc; int irq; u64 alarm_interval; /* for periodic alarm */ u64 alarm_value; u32 tclk_period; /* nanoseconds */ u32 tmr_prsc; u32 tmr_add; u32 cksel; u32 tmr_fiper1; u32 tmr_fiper2; }; /* * Register access functions */ /* Caller must hold etsects->lock. */ static u64 tmr_cnt_read(struct etsects *etsects) { u64 ns; u32 lo, hi; lo = gfar_read(&etsects->regs->tmr_cnt_l); hi = gfar_read(&etsects->regs->tmr_cnt_h); ns = ((u64) hi) << 32; ns |= lo; return ns; } /* Caller must hold etsects->lock. */ static void tmr_cnt_write(struct etsects *etsects, u64 ns) { u32 hi = ns >> 32; u32 lo = ns & 0xffffffff; gfar_write(&etsects->regs->tmr_cnt_l, lo); gfar_write(&etsects->regs->tmr_cnt_h, hi); } /* Caller must hold etsects->lock. */ static void set_alarm(struct etsects *etsects) { u64 ns; u32 lo, hi; ns = tmr_cnt_read(etsects) + 1500000000ULL; ns = div_u64(ns, 1000000000UL) * 1000000000ULL; ns -= etsects->tclk_period; hi = ns >> 32; lo = ns & 0xffffffff; gfar_write(&etsects->regs->tmr_alarm1_l, lo); gfar_write(&etsects->regs->tmr_alarm1_h, hi); } /* Caller must hold etsects->lock. */ static void set_fipers(struct etsects *etsects) { set_alarm(etsects); gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); } /* * Interrupt service routine */ static irqreturn_t isr(int irq, void *priv) { struct etsects *etsects = priv; struct ptp_clock_event event; u64 ns; u32 ack = 0, lo, hi, mask, val; val = gfar_read(&etsects->regs->tmr_tevent); if (val & ETS1) { ack |= ETS1; hi = gfar_read(&etsects->regs->tmr_etts1_h); lo = gfar_read(&etsects->regs->tmr_etts1_l); event.type = PTP_CLOCK_EXTTS; event.index = 0; event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; ptp_clock_event(etsects->clock, &event); } if (val & ETS2) { ack |= ETS2; hi = gfar_read(&etsects->regs->tmr_etts2_h); lo = gfar_read(&etsects->regs->tmr_etts2_l); event.type = PTP_CLOCK_EXTTS; event.index = 1; event.timestamp = ((u64) hi) << 32; event.timestamp |= lo; ptp_clock_event(etsects->clock, &event); } if (val & ALM2) { ack |= ALM2; if (etsects->alarm_value) { event.type = PTP_CLOCK_ALARM; event.index = 0; event.timestamp = etsects->alarm_value; ptp_clock_event(etsects->clock, &event); } if (etsects->alarm_interval) { ns = etsects->alarm_value + etsects->alarm_interval; hi = ns >> 32; lo = ns & 0xffffffff; spin_lock(&etsects->lock); gfar_write(&etsects->regs->tmr_alarm2_l, lo); gfar_write(&etsects->regs->tmr_alarm2_h, hi); spin_unlock(&etsects->lock); etsects->alarm_value = ns; } else { gfar_write(&etsects->regs->tmr_tevent, ALM2); spin_lock(&etsects->lock); mask = gfar_read(&etsects->regs->tmr_temask); mask &= ~ALM2EN; gfar_write(&etsects->regs->tmr_temask, mask); spin_unlock(&etsects->lock); etsects->alarm_value = 0; etsects->alarm_interval = 0; } } if (val & PP1) { ack |= PP1; event.type = PTP_CLOCK_PPS; ptp_clock_event(etsects->clock, &event); } if (ack) { gfar_write(&etsects->regs->tmr_tevent, ack); return IRQ_HANDLED; } else return IRQ_NONE; } /* * PTP clock operations */ static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { u64 adj; u32 diff, tmr_add; int neg_adj = 0; struct etsects *etsects = container_of(ptp, struct etsects, caps); if (ppb < 0) { neg_adj = 1; ppb = -ppb; } tmr_add = etsects->tmr_add; adj = tmr_add; adj *= ppb; diff = div_u64(adj, 1000000000ULL); tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; gfar_write(&etsects->regs->tmr_add, tmr_add); return 0; } static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) { s64 now; unsigned long flags; struct etsects *etsects = container_of(ptp, struct etsects, caps); spin_lock_irqsave(&etsects->lock, flags); now = tmr_cnt_read(etsects); now += delta; tmr_cnt_write(etsects, now); spin_unlock_irqrestore(&etsects->lock, flags); set_fipers(etsects); return 0; } static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts) { u64 ns; u32 remainder; unsigned long flags; struct etsects *etsects = container_of(ptp, struct etsects, caps); spin_lock_irqsave(&etsects->lock, flags); ns = tmr_cnt_read(etsects); spin_unlock_irqrestore(&etsects->lock, flags); ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); ts->tv_nsec = remainder; return 0; } static int ptp_gianfar_settime(struct ptp_clock_info *ptp, const struct timespec *ts) { u64 ns; unsigned long flags; struct etsects *etsects = container_of(ptp, struct etsects, caps); ns = ts->tv_sec * 1000000000ULL; ns += ts->tv_nsec; spin_lock_irqsave(&etsects->lock, flags); tmr_cnt_write(etsects, ns); set_fipers(etsects); spin_unlock_irqrestore(&etsects->lock, flags); return 0; } static int ptp_gianfar_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct etsects *etsects = container_of(ptp, struct etsects, caps); unsigned long flags; u32 bit, mask; switch (rq->type) { case PTP_CLK_REQ_EXTTS: switch (rq->extts.index) { case 0: bit = ETS1EN; break; case 1: bit = ETS2EN; break; default: return -EINVAL; } spin_lock_irqsave(&etsects->lock, flags); mask = gfar_read(&etsects->regs->tmr_temask); if (on) mask |= bit; else mask &= ~bit; gfar_write(&etsects->regs->tmr_temask, mask); spin_unlock_irqrestore(&etsects->lock, flags); return 0; case PTP_CLK_REQ_PPS: spin_lock_irqsave(&etsects->lock, flags); mask = gfar_read(&etsects->regs->tmr_temask); if (on) mask |= PP1EN; else mask &= ~PP1EN; gfar_write(&etsects->regs->tmr_temask, mask); spin_unlock_irqrestore(&etsects->lock, flags); return 0; default: break; } return -EOPNOTSUPP; } static struct ptp_clock_info ptp_gianfar_caps = { .owner = THIS_MODULE, .name = "gianfar clock", .max_adj = 512000, .n_alarm = N_ALARM, .n_ext_ts = N_EXT_TS, .n_per_out = 0, .pps = 1, .adjfreq = ptp_gianfar_adjfreq, .adjtime = ptp_gianfar_adjtime, .gettime = ptp_gianfar_gettime, .settime = ptp_gianfar_settime, .enable = ptp_gianfar_enable, }; /* OF device tree */ static int get_of_u32(struct device_node *node, char *str, u32 *val) { int plen; const u32 *prop = of_get_property(node, str, &plen); if (!prop || plen != sizeof(*prop)) return -1; *val = *prop; return 0; } static int gianfar_ptp_probe(struct platform_device *dev) { struct device_node *node = dev->dev.of_node; struct etsects *etsects; struct timespec now; int err = -ENOMEM; u32 tmr_ctrl; unsigned long flags; etsects = kzalloc(sizeof(*etsects), GFP_KERNEL); if (!etsects) goto no_memory; err = -ENODEV; etsects->caps = ptp_gianfar_caps; etsects->cksel = DEFAULT_CKSEL; if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) || get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) || get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) || get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) { pr_err("device tree node missing required elements\n"); goto no_node; } etsects->irq = platform_get_irq(dev, 0); if (etsects->irq == NO_IRQ) { pr_err("irq not in device tree\n"); goto no_node; } if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) { pr_err("request_irq failed\n"); goto no_node; } etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!etsects->rsrc) { pr_err("no resource\n"); goto no_resource; } if (request_resource(&ioport_resource, etsects->rsrc)) { pr_err("resource busy\n"); goto no_resource; } spin_lock_init(&etsects->lock); etsects->regs = ioremap(etsects->rsrc->start, 1 + etsects->rsrc->end - etsects->rsrc->start); if (!etsects->regs) { pr_err("ioremap ptp registers failed\n"); goto no_ioremap; } getnstimeofday(&now); ptp_gianfar_settime(&etsects->caps, &now); tmr_ctrl = (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT; spin_lock_irqsave(&etsects->lock, flags); gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl); gfar_write(&etsects->regs->tmr_add, etsects->tmr_add); gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); set_alarm(etsects); gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD); spin_unlock_irqrestore(&etsects->lock, flags); etsects->clock = ptp_clock_register(&etsects->caps); if (IS_ERR(etsects->clock)) { err = PTR_ERR(etsects->clock); goto no_clock; } dev_set_drvdata(&dev->dev, etsects); return 0; no_clock: iounmap(etsects->regs); no_ioremap: release_resource(etsects->rsrc); no_resource: free_irq(etsects->irq, etsects); no_node: kfree(etsects); no_memory: return err; } static int gianfar_ptp_remove(struct platform_device *dev) { struct etsects *etsects = dev_get_drvdata(&dev->dev); gfar_write(&etsects->regs->tmr_temask, 0); gfar_write(&etsects->regs->tmr_ctrl, 0); ptp_clock_unregister(etsects->clock); iounmap(etsects->regs); release_resource(etsects->rsrc); free_irq(etsects->irq, etsects); kfree(etsects); return 0; } static struct of_device_id match_table[] = { { .compatible = "fsl,etsec-ptp" }, {}, }; static struct platform_driver gianfar_ptp_driver = { .driver = { .name = "gianfar_ptp", .of_match_table = match_table, .owner = THIS_MODULE, }, .probe = gianfar_ptp_probe, .remove = gianfar_ptp_remove, }; /* module operations */ static int __init ptp_gianfar_init(void) { return platform_driver_register(&gianfar_ptp_driver); } module_init(ptp_gianfar_init); static void __exit ptp_gianfar_exit(void) { platform_driver_unregister(&gianfar_ptp_driver); } module_exit(ptp_gianfar_exit); MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>"); MODULE_DESCRIPTION("PTP clock using the eTSEC"); MODULE_LICENSE("GPL");
gpl-2.0
cricard13/linux-raspberry-nfc
drivers/uwb/rsv.c
827
28037
/* * UWB reservation management. * * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/uwb.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/export.h> #include "uwb-internal.h" static void uwb_rsv_timer(unsigned long arg); static const char *rsv_states[] = { [UWB_RSV_STATE_NONE] = "none ", [UWB_RSV_STATE_O_INITIATED] = "o initiated ", [UWB_RSV_STATE_O_PENDING] = "o pending ", [UWB_RSV_STATE_O_MODIFIED] = "o modified ", [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", [UWB_RSV_STATE_T_PENDING] = "t pending ", [UWB_RSV_STATE_T_DENIED] = "t denied ", [UWB_RSV_STATE_T_RESIZED] = "t resized ", [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", }; static const char *rsv_types[] = { [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp", [UWB_DRP_TYPE_HARD] = "hard", [UWB_DRP_TYPE_SOFT] = "soft", [UWB_DRP_TYPE_PRIVATE] = "private", [UWB_DRP_TYPE_PCA] = "pca", }; bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) { static const bool has_two_drp_ies[] = { [UWB_RSV_STATE_O_INITIATED] = false, [UWB_RSV_STATE_O_PENDING] = false, [UWB_RSV_STATE_O_MODIFIED] = false, [UWB_RSV_STATE_O_ESTABLISHED] = false, [UWB_RSV_STATE_O_TO_BE_MOVED] = false, [UWB_RSV_STATE_O_MOVE_COMBINING] = false, [UWB_RSV_STATE_O_MOVE_REDUCING] = false, [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, [UWB_RSV_STATE_T_ACCEPTED] = false, [UWB_RSV_STATE_T_CONFLICT] = false, [UWB_RSV_STATE_T_PENDING] = false, [UWB_RSV_STATE_T_DENIED] = false, [UWB_RSV_STATE_T_RESIZED] = false, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, }; return has_two_drp_ies[rsv->state]; } /** * uwb_rsv_state_str - return a string for a reservation state * @state: the reservation state. */ const char *uwb_rsv_state_str(enum uwb_rsv_state state) { if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST) return "unknown"; return rsv_states[state]; } EXPORT_SYMBOL_GPL(uwb_rsv_state_str); /** * uwb_rsv_type_str - return a string for a reservation type * @type: the reservation type */ const char *uwb_rsv_type_str(enum uwb_drp_type type) { if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA) return "invalid"; return rsv_types[type]; } EXPORT_SYMBOL_GPL(uwb_rsv_type_str); void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) { struct device *dev = &rsv->rc->uwb_dev.dev; struct uwb_dev_addr devaddr; char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); if (rsv->target.type == UWB_RSV_TARGET_DEV) devaddr = rsv->target.dev->dev_addr; else devaddr = rsv->target.devaddr; uwb_dev_addr_print(target, sizeof(target), &devaddr); dev_dbg(dev, "rsv %s %s -> %s: %s\n", text, owner, target, uwb_rsv_state_str(rsv->state)); } static void uwb_rsv_release(struct kref *kref) { struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); kfree(rsv); } void uwb_rsv_get(struct uwb_rsv *rsv) { kref_get(&rsv->kref); } void uwb_rsv_put(struct uwb_rsv *rsv) { kref_put(&rsv->kref, uwb_rsv_release); } /* * Get a free stream index for a reservation. * * If the target is a DevAddr (e.g., a WUSB cluster reservation) then * the stream is allocated from a pool of per-RC stream indexes, * otherwise a unique stream index for the target is selected. */ static int uwb_rsv_get_stream(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct device *dev = &rc->uwb_dev.dev; unsigned long *streams_bm; int stream; switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: streams_bm = rsv->target.dev->streams; break; case UWB_RSV_TARGET_DEVADDR: streams_bm = rc->uwb_dev.streams; break; default: return -EINVAL; } stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS); if (stream >= UWB_NUM_STREAMS) { dev_err(dev, "%s: no available stream found\n", __func__); return -EBUSY; } rsv->stream = stream; set_bit(stream, streams_bm); dev_dbg(dev, "get stream %d\n", rsv->stream); return 0; } static void uwb_rsv_put_stream(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct device *dev = &rc->uwb_dev.dev; unsigned long *streams_bm; switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: streams_bm = rsv->target.dev->streams; break; case UWB_RSV_TARGET_DEVADDR: streams_bm = rc->uwb_dev.streams; break; default: return; } clear_bit(rsv->stream, streams_bm); dev_dbg(dev, "put stream %d\n", rsv->stream); } void uwb_rsv_backoff_win_timer(unsigned long arg) { struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); struct device *dev = &rc->uwb_dev.dev; bow->can_reserve_extra_mases = true; if (bow->total_expired <= 4) { bow->total_expired++; } else { /* after 4 backoff window has expired we can exit from * the backoff procedure */ bow->total_expired = 0; bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; } dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n", bow->total_expired, bow->n); /* try to relocate all the "to be moved" relocations */ uwb_rsv_handle_drp_avail_change(rc); } void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) { struct uwb_drp_backoff_win *bow = &rc->bow; struct device *dev = &rc->uwb_dev.dev; unsigned timeout_us; dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); bow->can_reserve_extra_mases = false; if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) return; bow->window <<= 1; bow->n = prandom_u32() & (bow->window - 1); dev_dbg(dev, "new_window=%d, n=%d\n", bow->window, bow->n); /* reset the timer associated variables */ timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; bow->total_expired = 0; mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); } static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) { int sframes = UWB_MAX_LOST_BEACONS; /* * Multicast reservations can become established within 1 * super frame and should not be terminated if no response is * received. */ if (rsv->state == UWB_RSV_STATE_NONE) { sframes = 0; } else if (rsv->is_multicast) { if (rsv->state == UWB_RSV_STATE_O_INITIATED || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) sframes = 1; if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) sframes = 0; } if (sframes > 0) { /* * Add an additional 2 superframes to account for the * time to send the SET DRP IE command. */ unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US; mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us)); } else del_timer(&rsv->timer); } /* * Update a reservations state, and schedule an update of the * transmitted DRP IEs. */ static void uwb_rsv_state_update(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) { rsv->state = new_state; rsv->ie_valid = false; uwb_rsv_dump("SU", rsv); uwb_rsv_stroke_timer(rsv); uwb_rsv_sched_update(rsv->rc); } static void uwb_rsv_callback(struct uwb_rsv *rsv) { if (rsv->callback) rsv->callback(rsv); } void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) { struct uwb_rsv_move *mv = &rsv->mv; if (rsv->state == new_state) { switch (rsv->state) { case UWB_RSV_STATE_O_ESTABLISHED: case UWB_RSV_STATE_O_MOVE_EXPANDING: case UWB_RSV_STATE_O_MOVE_COMBINING: case UWB_RSV_STATE_O_MOVE_REDUCING: case UWB_RSV_STATE_T_ACCEPTED: case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: case UWB_RSV_STATE_T_RESIZED: case UWB_RSV_STATE_NONE: uwb_rsv_stroke_timer(rsv); break; default: /* Expecting a state transition so leave timer as-is. */ break; } return; } uwb_rsv_dump("SC", rsv); switch (new_state) { case UWB_RSV_STATE_NONE: uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); uwb_rsv_remove(rsv); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_O_INITIATED: uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED); break; case UWB_RSV_STATE_O_PENDING: uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); break; case UWB_RSV_STATE_O_MODIFIED: /* in the companion there are the MASes to drop */ bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); break; case UWB_RSV_STATE_O_ESTABLISHED: if (rsv->state == UWB_RSV_STATE_O_MODIFIED || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { uwb_drp_avail_release(rsv->rc, &mv->companion_mas); rsv->needs_release_companion_mas = false; } uwb_drp_avail_reserve(rsv->rc, &rsv->mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_O_MOVE_EXPANDING: rsv->needs_release_companion_mas = true; uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); break; case UWB_RSV_STATE_O_MOVE_COMBINING: rsv->needs_release_companion_mas = false; uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); rsv->mas.safe += mv->companion_mas.safe; rsv->mas.unsafe += mv->companion_mas.unsafe; uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); break; case UWB_RSV_STATE_O_MOVE_REDUCING: bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); rsv->needs_release_companion_mas = true; rsv->mas.safe = mv->final_mas.safe; rsv->mas.unsafe = mv->final_mas.unsafe; bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); break; case UWB_RSV_STATE_T_ACCEPTED: case UWB_RSV_STATE_T_RESIZED: rsv->needs_release_companion_mas = false; uwb_drp_avail_reserve(rsv->rc, &rsv->mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); uwb_rsv_callback(rsv); break; case UWB_RSV_STATE_T_DENIED: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); break; case UWB_RSV_STATE_T_CONFLICT: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); break; case UWB_RSV_STATE_T_PENDING: uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); break; case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: rsv->needs_release_companion_mas = true; uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); break; default: dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", uwb_rsv_state_str(new_state), new_state); } } static void uwb_rsv_handle_timeout_work(struct work_struct *work) { struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, handle_timeout_work); struct uwb_rc *rc = rsv->rc; mutex_lock(&rc->rsvs_mutex); uwb_rsv_dump("TO", rsv); switch (rsv->state) { case UWB_RSV_STATE_O_INITIATED: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_EXPANDING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_COMBINING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); goto unlock; } break; case UWB_RSV_STATE_O_MOVE_REDUCING: if (rsv->is_multicast) { uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); goto unlock; } break; case UWB_RSV_STATE_O_ESTABLISHED: if (rsv->is_multicast) goto unlock; break; case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: /* * The time out could be for the main or of the * companion DRP, assume it's for the companion and * drop that first. A further time out is required to * drop the main. */ uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); goto unlock; case UWB_RSV_STATE_NONE: goto unlock; default: break; } uwb_rsv_remove(rsv); unlock: mutex_unlock(&rc->rsvs_mutex); } static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) { struct uwb_rsv *rsv; rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL); if (!rsv) return NULL; INIT_LIST_HEAD(&rsv->rc_node); INIT_LIST_HEAD(&rsv->pal_node); kref_init(&rsv->kref); init_timer(&rsv->timer); rsv->timer.function = uwb_rsv_timer; rsv->timer.data = (unsigned long)rsv; rsv->rc = rc; INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); return rsv; } /** * uwb_rsv_create - allocate and initialize a UWB reservation structure * @rc: the radio controller * @cb: callback to use when the reservation completes or terminates * @pal_priv: data private to the PAL to be passed in the callback * * The callback is called when the state of the reservation changes from: * * - pending to accepted * - pending to denined * - accepted to terminated * - pending to terminated */ struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv) { struct uwb_rsv *rsv; rsv = uwb_rsv_alloc(rc); if (!rsv) return NULL; rsv->callback = cb; rsv->pal_priv = pal_priv; return rsv; } EXPORT_SYMBOL_GPL(uwb_rsv_create); void uwb_rsv_remove(struct uwb_rsv *rsv) { uwb_rsv_dump("RM", rsv); if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); if (rsv->needs_release_companion_mas) uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); uwb_drp_avail_release(rsv->rc, &rsv->mas); if (uwb_rsv_is_owner(rsv)) uwb_rsv_put_stream(rsv); uwb_dev_put(rsv->owner); if (rsv->target.type == UWB_RSV_TARGET_DEV) uwb_dev_put(rsv->target.dev); list_del_init(&rsv->rc_node); uwb_rsv_put(rsv); } /** * uwb_rsv_destroy - free a UWB reservation structure * @rsv: the reservation to free * * The reservation must already be terminated. */ void uwb_rsv_destroy(struct uwb_rsv *rsv) { uwb_rsv_put(rsv); } EXPORT_SYMBOL_GPL(uwb_rsv_destroy); /** * usb_rsv_establish - start a reservation establishment * @rsv: the reservation * * The PAL should fill in @rsv's owner, target, type, max_mas, * min_mas, max_interval and is_multicast fields. If the target is a * uwb_dev it must be referenced. * * The reservation's callback will be called when the reservation is * accepted, denied or times out. */ int uwb_rsv_establish(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; struct uwb_mas_bm available; struct device *dev = &rc->uwb_dev.dev; int ret; mutex_lock(&rc->rsvs_mutex); ret = uwb_rsv_get_stream(rsv); if (ret) { dev_err(dev, "%s: uwb_rsv_get_stream failed: %d\n", __func__, ret); goto out; } rsv->tiebreaker = prandom_u32() & 1; /* get available mas bitmap */ uwb_drp_available(rc, &available); ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); if (ret == UWB_RSV_ALLOC_NOT_FOUND) { ret = -EBUSY; uwb_rsv_put_stream(rsv); dev_err(dev, "%s: uwb_rsv_find_best_allocation failed: %d\n", __func__, ret); goto out; } ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); if (ret != 0) { uwb_rsv_put_stream(rsv); dev_err(dev, "%s: uwb_drp_avail_reserve_pending failed: %d\n", __func__, ret); goto out; } uwb_rsv_get(rsv); list_add_tail(&rsv->rc_node, &rc->reservations); rsv->owner = &rc->uwb_dev; uwb_dev_get(rsv->owner); uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED); out: mutex_unlock(&rc->rsvs_mutex); return ret; } EXPORT_SYMBOL_GPL(uwb_rsv_establish); /** * uwb_rsv_modify - modify an already established reservation * @rsv: the reservation to modify * @max_mas: new maximum MAS to reserve * @min_mas: new minimum MAS to reserve * @max_interval: new max_interval to use * * FIXME: implement this once there are PALs that use it. */ int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) { return -ENOSYS; } EXPORT_SYMBOL_GPL(uwb_rsv_modify); /* * move an already established reservation (rc->rsvs_mutex must to be * taken when tis function is called) */ int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) { struct uwb_rc *rc = rsv->rc; struct uwb_drp_backoff_win *bow = &rc->bow; struct device *dev = &rc->uwb_dev.dev; struct uwb_rsv_move *mv; int ret = 0; if (bow->can_reserve_extra_mases == false) return -EBUSY; mv = &rsv->mv; if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { /* We want to move the reservation */ bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); } } else { dev_dbg(dev, "new allocation not found\n"); } return ret; } /* It will try to move every reservation in state O_ESTABLISHED giving * to the MAS allocator algorithm an availability that is the real one * plus the allocation already established from the reservation. */ void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) { struct uwb_drp_backoff_win *bow = &rc->bow; struct uwb_rsv *rsv; struct uwb_mas_bm mas; if (bow->can_reserve_extra_mases == false) return; list_for_each_entry(rsv, &rc->reservations, rc_node) { if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { uwb_drp_available(rc, &mas); bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); uwb_rsv_try_move(rsv, &mas); } } } /** * uwb_rsv_terminate - terminate an established reservation * @rsv: the reservation to terminate * * A reservation is terminated by removing the DRP IE from the beacon, * the other end will consider the reservation to be terminated when * it does not see the DRP IE for at least mMaxLostBeacons. * * If applicable, the reference to the target uwb_dev will be released. */ void uwb_rsv_terminate(struct uwb_rsv *rsv) { struct uwb_rc *rc = rsv->rc; mutex_lock(&rc->rsvs_mutex); if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); mutex_unlock(&rc->rsvs_mutex); } EXPORT_SYMBOL_GPL(uwb_rsv_terminate); /** * uwb_rsv_accept - accept a new reservation from a peer * @rsv: the reservation * @cb: call back for reservation changes * @pal_priv: data to be passed in the above call back * * Reservation requests from peers are denied unless a PAL accepts it * by calling this function. * * The PAL call uwb_rsv_destroy() for all accepted reservations before * calling uwb_pal_unregister(). */ void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) { uwb_rsv_get(rsv); rsv->callback = cb; rsv->pal_priv = pal_priv; rsv->state = UWB_RSV_STATE_T_ACCEPTED; } EXPORT_SYMBOL_GPL(uwb_rsv_accept); /* * Is a received DRP IE for this reservation? */ static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_dev_addr *rsv_src; int stream; stream = uwb_ie_drp_stream_index(drp_ie); if (rsv->stream != stream) return false; switch (rsv->target.type) { case UWB_RSV_TARGET_DEVADDR: return rsv->stream == stream; case UWB_RSV_TARGET_DEV: if (uwb_ie_drp_owner(drp_ie)) rsv_src = &rsv->owner->dev_addr; else rsv_src = &rsv->target.dev->dev_addr; return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0; } return false; } static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_rsv *rsv; struct uwb_pal *pal; enum uwb_rsv_state state; rsv = uwb_rsv_alloc(rc); if (!rsv) return NULL; rsv->rc = rc; rsv->owner = src; uwb_dev_get(rsv->owner); rsv->target.type = UWB_RSV_TARGET_DEV; rsv->target.dev = &rc->uwb_dev; uwb_dev_get(&rc->uwb_dev); rsv->type = uwb_ie_drp_type(drp_ie); rsv->stream = uwb_ie_drp_stream_index(drp_ie); uwb_drp_ie_to_bm(&rsv->mas, drp_ie); /* * See if any PALs are interested in this reservation. If not, * deny the request. */ rsv->state = UWB_RSV_STATE_T_DENIED; mutex_lock(&rc->uwb_dev.mutex); list_for_each_entry(pal, &rc->pals, node) { if (pal->new_rsv) pal->new_rsv(pal, rsv); if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) break; } mutex_unlock(&rc->uwb_dev.mutex); list_add_tail(&rsv->rc_node, &rc->reservations); state = rsv->state; rsv->state = UWB_RSV_STATE_NONE; /* FIXME: do something sensible here */ if (state == UWB_RSV_STATE_T_ACCEPTED && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { /* FIXME: do something sensible here */ } else { uwb_rsv_set_state(rsv, state); } return rsv; } /** * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations * @rsv: the reservation. * @mas: returns the available MAS. * * The usable MAS of a reservation may be less than the negotiated MAS * if alien BPs are present. */ void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) { bitmap_zero(mas->bm, UWB_NUM_MAS); bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); } EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); /** * uwb_rsv_find - find a reservation for a received DRP IE. * @rc: the radio controller * @src: source of the DRP IE * @drp_ie: the DRP IE * * If the reservation cannot be found and the DRP IE is from a peer * attempting to establish a new reservation, create a new reservation * and add it to the list. */ struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, struct uwb_ie_drp *drp_ie) { struct uwb_rsv *rsv; list_for_each_entry(rsv, &rc->reservations, rc_node) { if (uwb_rsv_match(rsv, src, drp_ie)) return rsv; } if (uwb_ie_drp_owner(drp_ie)) return uwb_rsv_new_target(rc, src, drp_ie); return NULL; } /* * Go through all the reservations and check for timeouts and (if * necessary) update their DRP IEs. * * FIXME: look at building the SET_DRP_IE command here rather than * having to rescan the list in uwb_rc_send_all_drp_ie(). */ static bool uwb_rsv_update_all(struct uwb_rc *rc) { struct uwb_rsv *rsv, *t; bool ie_updated = false; list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { if (!rsv->ie_valid) { uwb_drp_ie_update(rsv); ie_updated = true; } } return ie_updated; } void uwb_rsv_queue_update(struct uwb_rc *rc) { unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); } /** * uwb_rsv_sched_update - schedule an update of the DRP IEs * @rc: the radio controller. * * To improve performance and ensure correctness with [ECMA-368] the * number of SET-DRP-IE commands that are done are limited. * * DRP IEs update come from two sources: DRP events from the hardware * which all occur at the beginning of the superframe ('syncronous' * events) and reservation establishment/termination requests from * PALs or timers ('asynchronous' events). * * A delayed work ensures that all the synchronous events result in * one SET-DRP-IE command. * * Additional logic (the set_drp_ie_pending and rsv_updated_postponed * flags) will prevent an asynchrous event starting a SET-DRP-IE * command if one is currently awaiting a response. * * FIXME: this does leave a window where an asynchrous event can delay * the SET-DRP-IE for a synchronous event by one superframe. */ void uwb_rsv_sched_update(struct uwb_rc *rc) { spin_lock_irq(&rc->rsvs_lock); if (!delayed_work_pending(&rc->rsv_update_work)) { if (rc->set_drp_ie_pending > 0) { rc->set_drp_ie_pending++; goto unlock; } uwb_rsv_queue_update(rc); } unlock: spin_unlock_irq(&rc->rsvs_lock); } /* * Update DRP IEs and, if necessary, the DRP Availability IE and send * the updated IEs to the radio controller. */ static void uwb_rsv_update_work(struct work_struct *work) { struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work.work); bool ie_updated; mutex_lock(&rc->rsvs_mutex); ie_updated = uwb_rsv_update_all(rc); if (!rc->drp_avail.ie_valid) { uwb_drp_avail_ie_update(rc); ie_updated = true; } if (ie_updated && (rc->set_drp_ie_pending == 0)) uwb_rc_send_all_drp_ie(rc); mutex_unlock(&rc->rsvs_mutex); } static void uwb_rsv_alien_bp_work(struct work_struct *work) { struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_alien_bp_work.work); struct uwb_rsv *rsv; mutex_lock(&rc->rsvs_mutex); list_for_each_entry(rsv, &rc->reservations, rc_node) { if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { uwb_rsv_callback(rsv); } } mutex_unlock(&rc->rsvs_mutex); } static void uwb_rsv_timer(unsigned long arg) { struct uwb_rsv *rsv = (struct uwb_rsv *)arg; queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); } /** * uwb_rsv_remove_all - remove all reservations * @rc: the radio controller * * A DRP IE update is not done. */ void uwb_rsv_remove_all(struct uwb_rc *rc) { struct uwb_rsv *rsv, *t; mutex_lock(&rc->rsvs_mutex); list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { if (rsv->state != UWB_RSV_STATE_NONE) uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); del_timer_sync(&rsv->timer); } /* Cancel any postponed update. */ rc->set_drp_ie_pending = 0; mutex_unlock(&rc->rsvs_mutex); cancel_delayed_work_sync(&rc->rsv_update_work); flush_workqueue(rc->rsv_workq); mutex_lock(&rc->rsvs_mutex); list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { uwb_rsv_remove(rsv); } mutex_unlock(&rc->rsvs_mutex); } void uwb_rsv_init(struct uwb_rc *rc) { INIT_LIST_HEAD(&rc->reservations); INIT_LIST_HEAD(&rc->cnflt_alien_list); mutex_init(&rc->rsvs_mutex); spin_lock_init(&rc->rsvs_lock); INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); rc->bow.can_reserve_extra_mases = true; rc->bow.total_expired = 0; rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; init_timer(&rc->bow.timer); rc->bow.timer.function = uwb_rsv_backoff_win_timer; rc->bow.timer.data = (unsigned long)&rc->bow; bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); } int uwb_rsv_setup(struct uwb_rc *rc) { char name[16]; snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev)); rc->rsv_workq = create_singlethread_workqueue(name); if (rc->rsv_workq == NULL) return -ENOMEM; return 0; } void uwb_rsv_cleanup(struct uwb_rc *rc) { uwb_rsv_remove_all(rc); destroy_workqueue(rc->rsv_workq); }
gpl-2.0
CPA-Poke/LiquidKernel-Shamu
drivers/block/paride/pf.c
2619
24889
/* pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. This is the high-level driver for parallel port ATAPI disk drives based on chips supported by the paride module. By default, the driver will autoprobe for a single parallel port ATAPI disk drive, but if their individual parameters are specified, the driver can handle up to 4 drives. The behaviour of the pf driver can be altered by setting some parameters from the insmod command line. The following parameters are adjustable: drive0 These four arguments can be arrays of drive1 1-7 integers as follows: drive2 drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly> Where, <prt> is the base of the parallel port address for the corresponding drive. (required) <pro> is the protocol number for the adapter that supports this drive. These numbers are logged by 'paride' when the protocol modules are initialised. (0 if not given) <uni> for those adapters that support chained devices, this is the unit selector for the chain of devices on the given port. It should be zero for devices that don't support chaining. (0 if not given) <mod> this can be -1 to choose the best mode, or one of the mode numbers supported by the adapter. (-1 if not given) <slv> ATAPI CDroms can be jumpered to master or slave. Set this to 0 to choose the master drive, 1 to choose the slave, -1 (the default) to choose the first drive found. <lun> Some ATAPI devices support multiple LUNs. One example is the ATAPI PD/CD drive from Matshita/Panasonic. This device has a CD drive on LUN 0 and a PD drive on LUN 1. By default, the driver will search for the first LUN with a supported device. Set this parameter to force it to use a specific LUN. (default -1) <dly> some parallel ports require the driver to go more slowly. -1 sets a default value that should work with the chosen protocol. Otherwise, set this to a small integer, the larger it is the slower the port i/o. In some cases, setting this to zero will speed up the device. (default -1) major You may use this parameter to overide the default major number (47) that this driver will use. Be sure to change the device name as well. name This parameter is a character string that contains the name the kernel will use for this device (in /proc output, for instance). (default "pf"). cluster The driver will attempt to aggregate requests for adjacent blocks into larger multi-block clusters. The maximum cluster size (in 512 byte sectors) is set with this parameter. (default 64) verbose This parameter controls the amount of logging that the driver will do. Set it to 0 for normal operation, 1 to see autoprobe progress messages, or 2 to see additional debugging output. (default 0) nice This parameter controls the driver's use of idle CPU time, at the expense of some speed. If this driver is built into the kernel, you can use the following command line parameters, with the same values as the corresponding module parameters listed above: pf.drive0 pf.drive1 pf.drive2 pf.drive3 pf.cluster pf.nice In addition, you can use the parameter pf.disable to disable the driver entirely. */ /* Changes: 1.01 GRG 1998.05.03 Changes for SMP. Eliminate sti(). Fix for drives that don't clear STAT_ERR until after next CDB delivered. Small change in pf_completion to round up transfer size. 1.02 GRG 1998.06.16 Eliminated an Ugh 1.03 GRG 1998.08.16 Use HZ in loop timings, extra debugging 1.04 GRG 1998.09.24 Added jumbo support */ #define PF_VERSION "1.04" #define PF_MAJOR 47 #define PF_NAME "pf" #define PF_UNITS 4 #include <linux/types.h> /* Here are things one can override from the insmod command. Most are autoprobed by paride unless set here. Verbose is off by default. */ static bool verbose = 0; static int major = PF_MAJOR; static char *name = PF_NAME; static int cluster = 64; static int nice = 0; static int disable = 0; static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 }; static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 }; static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 }; static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 }; static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3}; static int pf_drive_count; enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY}; /* end of parameters */ #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/cdrom.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/mutex.h> #include <asm/uaccess.h> static DEFINE_MUTEX(pf_mutex); static DEFINE_SPINLOCK(pf_spin_lock); module_param(verbose, bool, 0644); module_param(major, int, 0); module_param(name, charp, 0); module_param(cluster, int, 0); module_param(nice, int, 0); module_param_array(drive0, int, NULL, 0); module_param_array(drive1, int, NULL, 0); module_param_array(drive2, int, NULL, 0); module_param_array(drive3, int, NULL, 0); #include "paride.h" #include "pseudo.h" /* constants for faking geometry numbers */ #define PF_FD_MAX 8192 /* use FD geometry under this size */ #define PF_FD_HDS 2 #define PF_FD_SPT 18 #define PF_HD_HDS 64 #define PF_HD_SPT 32 #define PF_MAX_RETRIES 5 #define PF_TMO 800 /* interrupt timeout in jiffies */ #define PF_SPIN_DEL 50 /* spin delay in micro-seconds */ #define PF_SPIN (1000000*PF_TMO)/(HZ*PF_SPIN_DEL) #define STAT_ERR 0x00001 #define STAT_INDEX 0x00002 #define STAT_ECC 0x00004 #define STAT_DRQ 0x00008 #define STAT_SEEK 0x00010 #define STAT_WRERR 0x00020 #define STAT_READY 0x00040 #define STAT_BUSY 0x00080 #define ATAPI_REQ_SENSE 0x03 #define ATAPI_LOCK 0x1e #define ATAPI_DOOR 0x1b #define ATAPI_MODE_SENSE 0x5a #define ATAPI_CAPACITY 0x25 #define ATAPI_IDENTIFY 0x12 #define ATAPI_READ_10 0x28 #define ATAPI_WRITE_10 0x2a static int pf_open(struct block_device *bdev, fmode_t mode); static void do_pf_request(struct request_queue * q); static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo); static void pf_release(struct gendisk *disk, fmode_t mode); static int pf_detect(void); static void do_pf_read(void); static void do_pf_read_start(void); static void do_pf_write(void); static void do_pf_write_start(void); static void do_pf_read_drq(void); static void do_pf_write_done(void); #define PF_NM 0 #define PF_RO 1 #define PF_RW 2 #define PF_NAMELEN 8 struct pf_unit { struct pi_adapter pia; /* interface to paride layer */ struct pi_adapter *pi; int removable; /* removable media device ? */ int media_status; /* media present ? WP ? */ int drive; /* drive */ int lun; int access; /* count of active opens ... */ int present; /* device present ? */ char name[PF_NAMELEN]; /* pf0, pf1, ... */ struct gendisk *disk; }; static struct pf_unit units[PF_UNITS]; static int pf_identify(struct pf_unit *pf); static void pf_lock(struct pf_unit *pf, int func); static void pf_eject(struct pf_unit *pf); static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing); static char pf_scratch[512]; /* scratch block buffer */ /* the variables below are used mainly in the I/O request engine, which processes only one request at a time. */ static int pf_retries = 0; /* i/o error retry count */ static int pf_busy = 0; /* request being processed ? */ static struct request *pf_req; /* current request */ static int pf_block; /* address of next requested block */ static int pf_count; /* number of blocks still to do */ static int pf_run; /* sectors in current cluster */ static int pf_cmd; /* current command READ/WRITE */ static struct pf_unit *pf_current;/* unit of current request */ static int pf_mask; /* stopper for pseudo-int */ static char *pf_buf; /* buffer for request in progress */ /* kernel glue structures */ static const struct block_device_operations pf_fops = { .owner = THIS_MODULE, .open = pf_open, .release = pf_release, .ioctl = pf_ioctl, .getgeo = pf_getgeo, .check_events = pf_check_events, }; static void __init pf_init_units(void) { struct pf_unit *pf; int unit; pf_drive_count = 0; for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) { struct gendisk *disk = alloc_disk(1); if (!disk) continue; pf->disk = disk; pf->pi = &pf->pia; pf->media_status = PF_NM; pf->drive = (*drives[unit])[D_SLV]; pf->lun = (*drives[unit])[D_LUN]; snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit); disk->major = major; disk->first_minor = unit; strcpy(disk->disk_name, pf->name); disk->fops = &pf_fops; if (!(*drives[unit])[D_PRT]) pf_drive_count++; } } static int pf_open(struct block_device *bdev, fmode_t mode) { struct pf_unit *pf = bdev->bd_disk->private_data; int ret; mutex_lock(&pf_mutex); pf_identify(pf); ret = -ENODEV; if (pf->media_status == PF_NM) goto out; ret = -EROFS; if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE)) goto out; ret = 0; pf->access++; if (pf->removable) pf_lock(pf, 1); out: mutex_unlock(&pf_mutex); return ret; } static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct pf_unit *pf = bdev->bd_disk->private_data; sector_t capacity = get_capacity(pf->disk); if (capacity < PF_FD_MAX) { geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT); geo->heads = PF_FD_HDS; geo->sectors = PF_FD_SPT; } else { geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT); geo->heads = PF_HD_HDS; geo->sectors = PF_HD_SPT; } return 0; } static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct pf_unit *pf = bdev->bd_disk->private_data; if (cmd != CDROMEJECT) return -EINVAL; if (pf->access != 1) return -EBUSY; mutex_lock(&pf_mutex); pf_eject(pf); mutex_unlock(&pf_mutex); return 0; } static void pf_release(struct gendisk *disk, fmode_t mode) { struct pf_unit *pf = disk->private_data; mutex_lock(&pf_mutex); if (pf->access <= 0) { mutex_unlock(&pf_mutex); WARN_ON(1); return; } pf->access--; if (!pf->access && pf->removable) pf_lock(pf, 0); mutex_unlock(&pf_mutex); } static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing) { return DISK_EVENT_MEDIA_CHANGE; } static inline int status_reg(struct pf_unit *pf) { return pi_read_regr(pf->pi, 1, 6); } static inline int read_reg(struct pf_unit *pf, int reg) { return pi_read_regr(pf->pi, 0, reg); } static inline void write_reg(struct pf_unit *pf, int reg, int val) { pi_write_regr(pf->pi, 0, reg, val); } static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg) { int j, r, e, s, p; j = 0; while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop)))) && (j++ < PF_SPIN)) udelay(PF_SPIN_DEL); if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) { s = read_reg(pf, 7); e = read_reg(pf, 1); p = read_reg(pf, 2); if (j > PF_SPIN) e |= 0x100; if (fun) printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" " loop=%d phase=%d\n", pf->name, fun, msg, r, s, e, j, p); return (e << 8) + s; } return 0; } static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun) { pi_connect(pf->pi); write_reg(pf, 6, 0xa0+0x10*pf->drive); if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) { pi_disconnect(pf->pi); return -1; } write_reg(pf, 4, dlen % 256); write_reg(pf, 5, dlen / 256); write_reg(pf, 7, 0xa0); /* ATAPI packet command */ if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) { pi_disconnect(pf->pi); return -1; } if (read_reg(pf, 2) != 1) { printk("%s: %s: command phase error\n", pf->name, fun); pi_disconnect(pf->pi); return -1; } pi_write_block(pf->pi, cmd, 12); return 0; } static int pf_completion(struct pf_unit *pf, char *buf, char *fun) { int r, s, n; r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, fun, "completion"); if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) { n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) + 3) & 0xfffc); pi_read_block(pf->pi, buf, n); } s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done"); pi_disconnect(pf->pi); return (r ? r : s); } static void pf_req_sense(struct pf_unit *pf, int quiet) { char rs_cmd[12] = { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 }; char buf[16]; int r; r = pf_command(pf, rs_cmd, 16, "Request sense"); mdelay(1); if (!r) pf_completion(pf, buf, "Request sense"); if ((!r) && (!quiet)) printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n", pf->name, buf[2] & 0xf, buf[12], buf[13]); } static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun) { int r; r = pf_command(pf, cmd, dlen, fun); mdelay(1); if (!r) r = pf_completion(pf, buf, fun); if (r) pf_req_sense(pf, !fun); return r; } static void pf_lock(struct pf_unit *pf, int func) { char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 }; pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock"); } static void pf_eject(struct pf_unit *pf) { char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 }; pf_lock(pf, 0); pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject"); } #define PF_RESET_TMO 30 /* in tenths of a second */ static void pf_sleep(int cs) { schedule_timeout_interruptible(cs); } /* the ATAPI standard actually specifies the contents of all 7 registers after a reset, but the specification is ambiguous concerning the last two bytes, and different drives interpret the standard differently. */ static int pf_reset(struct pf_unit *pf) { int i, k, flg; int expect[5] = { 1, 1, 1, 0x14, 0xeb }; pi_connect(pf->pi); write_reg(pf, 6, 0xa0+0x10*pf->drive); write_reg(pf, 7, 8); pf_sleep(20 * HZ / 1000); k = 0; while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY)) pf_sleep(HZ / 10); flg = 1; for (i = 0; i < 5; i++) flg &= (read_reg(pf, i + 1) == expect[i]); if (verbose) { printk("%s: Reset (%d) signature = ", pf->name, k); for (i = 0; i < 5; i++) printk("%3x", read_reg(pf, i + 1)); if (!flg) printk(" (incorrect)"); printk("\n"); } pi_disconnect(pf->pi); return flg - 1; } static void pf_mode_sense(struct pf_unit *pf) { char ms_cmd[12] = { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 }; char buf[8]; pf_atapi(pf, ms_cmd, 8, buf, "mode sense"); pf->media_status = PF_RW; if (buf[3] & 0x80) pf->media_status = PF_RO; } static void xs(char *buf, char *targ, int offs, int len) { int j, k, l; j = 0; l = 0; for (k = 0; k < len; k++) if ((buf[k + offs] != 0x20) || (buf[k + offs] != l)) l = targ[j++] = buf[k + offs]; if (l == 0x20) j--; targ[j] = 0; } static int xl(char *buf, int offs) { int v, k; v = 0; for (k = 0; k < 4; k++) v = v * 256 + (buf[k + offs] & 0xff); return v; } static void pf_get_capacity(struct pf_unit *pf) { char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; char buf[8]; int bs; if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) { pf->media_status = PF_NM; return; } set_capacity(pf->disk, xl(buf, 0) + 1); bs = xl(buf, 4); if (bs != 512) { set_capacity(pf->disk, 0); if (verbose) printk("%s: Drive %d, LUN %d," " unsupported block size %d\n", pf->name, pf->drive, pf->lun, bs); } } static int pf_identify(struct pf_unit *pf) { int dt, s; char *ms[2] = { "master", "slave" }; char mf[10], id[18]; char id_cmd[12] = { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; char buf[36]; s = pf_atapi(pf, id_cmd, 36, buf, "identify"); if (s) return -1; dt = buf[0] & 0x1f; if ((dt != 0) && (dt != 7)) { if (verbose) printk("%s: Drive %d, LUN %d, unsupported type %d\n", pf->name, pf->drive, pf->lun, dt); return -1; } xs(buf, mf, 8, 8); xs(buf, id, 16, 16); pf->removable = (buf[1] & 0x80); pf_mode_sense(pf); pf_mode_sense(pf); pf_mode_sense(pf); pf_get_capacity(pf); printk("%s: %s %s, %s LUN %d, type %d", pf->name, mf, id, ms[pf->drive], pf->lun, dt); if (pf->removable) printk(", removable"); if (pf->media_status == PF_NM) printk(", no media\n"); else { if (pf->media_status == PF_RO) printk(", RO"); printk(", %llu blocks\n", (unsigned long long)get_capacity(pf->disk)); } return 0; } /* returns 0, with id set if drive is detected -1, if drive detection failed */ static int pf_probe(struct pf_unit *pf) { if (pf->drive == -1) { for (pf->drive = 0; pf->drive <= 1; pf->drive++) if (!pf_reset(pf)) { if (pf->lun != -1) return pf_identify(pf); else for (pf->lun = 0; pf->lun < 8; pf->lun++) if (!pf_identify(pf)) return 0; } } else { if (pf_reset(pf)) return -1; if (pf->lun != -1) return pf_identify(pf); for (pf->lun = 0; pf->lun < 8; pf->lun++) if (!pf_identify(pf)) return 0; } return -1; } static int pf_detect(void) { struct pf_unit *pf = units; int k, unit; printk("%s: %s version %s, major %d, cluster %d, nice %d\n", name, name, PF_VERSION, major, cluster, nice); k = 0; if (pf_drive_count == 0) { if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF, verbose, pf->name)) { if (!pf_probe(pf) && pf->disk) { pf->present = 1; k++; } else pi_release(pf->pi); } } else for (unit = 0; unit < PF_UNITS; unit++, pf++) { int *conf = *drives[unit]; if (!conf[D_PRT]) continue; if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD], conf[D_UNI], conf[D_PRO], conf[D_DLY], pf_scratch, PI_PF, verbose, pf->name)) { if (pf->disk && !pf_probe(pf)) { pf->present = 1; k++; } else pi_release(pf->pi); } } if (k) return 0; printk("%s: No ATAPI disk detected\n", name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); return -1; } /* The i/o request engine */ static int pf_start(struct pf_unit *pf, int cmd, int b, int c) { int i; char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; for (i = 0; i < 4; i++) { io_cmd[5 - i] = b & 0xff; b = b >> 8; } io_cmd[8] = c & 0xff; io_cmd[7] = (c >> 8) & 0xff; i = pf_command(pf, io_cmd, c * 512, "start i/o"); mdelay(1); return i; } static int pf_ready(void) { return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask)); } static struct request_queue *pf_queue; static void pf_end_request(int err) { if (pf_req && !__blk_end_request_cur(pf_req, err)) pf_req = NULL; } static void do_pf_request(struct request_queue * q) { if (pf_busy) return; repeat: if (!pf_req) { pf_req = blk_fetch_request(q); if (!pf_req) return; } pf_current = pf_req->rq_disk->private_data; pf_block = blk_rq_pos(pf_req); pf_run = blk_rq_sectors(pf_req); pf_count = blk_rq_cur_sectors(pf_req); if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { pf_end_request(-EIO); goto repeat; } pf_cmd = rq_data_dir(pf_req); pf_buf = pf_req->buffer; pf_retries = 0; pf_busy = 1; if (pf_cmd == READ) pi_do_claimed(pf_current->pi, do_pf_read); else if (pf_cmd == WRITE) pi_do_claimed(pf_current->pi, do_pf_write); else { pf_busy = 0; pf_end_request(-EIO); goto repeat; } } static int pf_next_buf(void) { unsigned long saved_flags; pf_count--; pf_run--; pf_buf += 512; pf_block++; if (!pf_run) return 1; if (!pf_count) { spin_lock_irqsave(&pf_spin_lock, saved_flags); pf_end_request(0); spin_unlock_irqrestore(&pf_spin_lock, saved_flags); if (!pf_req) return 1; pf_count = blk_rq_cur_sectors(pf_req); pf_buf = pf_req->buffer; } return 0; } static inline void next_request(int err) { unsigned long saved_flags; spin_lock_irqsave(&pf_spin_lock, saved_flags); pf_end_request(err); pf_busy = 0; do_pf_request(pf_queue); spin_unlock_irqrestore(&pf_spin_lock, saved_flags); } /* detach from the calling context - in case the spinlock is held */ static void do_pf_read(void) { ps_set_intr(do_pf_read_start, NULL, 0, nice); } static void do_pf_read_start(void) { pf_busy = 1; if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_retries++; pi_do_claimed(pf_current->pi, do_pf_read_start); return; } next_request(-EIO); return; } pf_mask = STAT_DRQ; ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice); } static void do_pf_read_drq(void) { while (1) { if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR, "read block", "completion") & STAT_ERR) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_req_sense(pf_current, 0); pf_retries++; pi_do_claimed(pf_current->pi, do_pf_read_start); return; } next_request(-EIO); return; } pi_read_block(pf_current->pi, pf_buf, 512); if (pf_next_buf()) break; } pi_disconnect(pf_current->pi); next_request(0); } static void do_pf_write(void) { ps_set_intr(do_pf_write_start, NULL, 0, nice); } static void do_pf_write_start(void) { pf_busy = 1; if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_retries++; pi_do_claimed(pf_current->pi, do_pf_write_start); return; } next_request(-EIO); return; } while (1) { if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR, "write block", "data wait") & STAT_ERR) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_retries++; pi_do_claimed(pf_current->pi, do_pf_write_start); return; } next_request(-EIO); return; } pi_write_block(pf_current->pi, pf_buf, 512); if (pf_next_buf()) break; } pf_mask = 0; ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice); } static void do_pf_write_done(void) { if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) { pi_disconnect(pf_current->pi); if (pf_retries < PF_MAX_RETRIES) { pf_retries++; pi_do_claimed(pf_current->pi, do_pf_write_start); return; } next_request(-EIO); return; } pi_disconnect(pf_current->pi); next_request(0); } static int __init pf_init(void) { /* preliminary initialisation */ struct pf_unit *pf; int unit; if (disable) return -EINVAL; pf_init_units(); if (pf_detect()) return -ENODEV; pf_busy = 0; if (register_blkdev(major, name)) { for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); return -EBUSY; } pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock); if (!pf_queue) { unregister_blkdev(major, name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) put_disk(pf->disk); return -ENOMEM; } blk_queue_max_segments(pf_queue, cluster); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { struct gendisk *disk = pf->disk; if (!pf->present) continue; disk->private_data = pf; disk->queue = pf_queue; add_disk(disk); } return 0; } static void __exit pf_exit(void) { struct pf_unit *pf; int unit; unregister_blkdev(major, name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { if (!pf->present) continue; del_gendisk(pf->disk); put_disk(pf->disk); pi_release(pf->pi); } blk_cleanup_queue(pf_queue); } MODULE_LICENSE("GPL"); module_init(pf_init) module_exit(pf_exit)
gpl-2.0
Supermaster34/3.0-Kernel-Galaxy-Player-US
fs/ext3/fsync.c
2619
2960
/* * linux/fs/ext3/fsync.c * * Copyright (C) 1993 Stephen Tweedie (sct@redhat.com) * from * Copyright (C) 1992 Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * from * linux/fs/minix/truncate.c Copyright (C) 1991, 1992 Linus Torvalds * * ext3fs fsync primitive * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * * Removed unnecessary code duplication for little endian machines * and excessive __inline__s. * Andi Kleen, 1997 * * Major simplications and cleanup - we only need to do the metadata, because * we can depend on generic_block_fdatasync() to sync the data blocks. */ #include <linux/time.h> #include <linux/blkdev.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/writeback.h> #include <linux/jbd.h> #include <linux/ext3_fs.h> #include <linux/ext3_jbd.h> /* * akpm: A new design for ext3_sync_file(). * * This is only called from sys_fsync(), sys_fdatasync() and sys_msync(). * There cannot be a transaction open by this task. * Another task could have dirtied this inode. Its data can be in any * state in the journalling system. * * What we do is just kick off a commit and wait on it. This will snapshot the * inode to disk. */ int ext3_sync_file(struct file *file, int datasync) { struct inode *inode = file->f_mapping->host; struct ext3_inode_info *ei = EXT3_I(inode); journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; int ret, needs_barrier = 0; tid_t commit_tid; if (inode->i_sb->s_flags & MS_RDONLY) return 0; J_ASSERT(ext3_journal_current_handle() == NULL); /* * data=writeback,ordered: * The caller's filemap_fdatawrite()/wait will sync the data. * Metadata is in the journal, we wait for a proper transaction * to commit here. * * data=journal: * filemap_fdatawrite won't do anything (the buffers are clean). * ext3_force_commit will write the file data into the journal and * will wait on that. * filemap_fdatawait() will encounter a ton of newly-dirtied pages * (they were dirtied by commit). But that's OK - the blocks are * safe in-journal, which is all fsync() needs to ensure. */ if (ext3_should_journal_data(inode)) return ext3_force_commit(inode->i_sb); if (datasync) commit_tid = atomic_read(&ei->i_datasync_tid); else commit_tid = atomic_read(&ei->i_sync_tid); if (test_opt(inode->i_sb, BARRIER) && !journal_trans_will_send_data_barrier(journal, commit_tid)) needs_barrier = 1; log_start_commit(journal, commit_tid); ret = log_wait_commit(journal, commit_tid); /* * In case we didn't commit a transaction, we have to flush * disk caches manually so that data really is on persistent * storage */ if (needs_barrier) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); return ret; }
gpl-2.0
x942/GuardianKernel-Tuna
drivers/misc/c2port/c2port-duramar2150.c
3131
3241
/* * Silicon Labs C2 port Linux support for Eurotech Duramar 2150 * * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it> * Copyright (c) 2008 Eurotech S.p.A. <info@eurotech.it> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/c2port.h> #define DATA_PORT 0x325 #define DIR_PORT 0x326 #define C2D (1 << 0) #define C2CK (1 << 1) static DEFINE_MUTEX(update_lock); /* * C2 port operations */ static void duramar2150_c2port_access(struct c2port_device *dev, int status) { u8 v; mutex_lock(&update_lock); v = inb(DIR_PORT); /* 0 = input, 1 = output */ if (status) outb(v | (C2D | C2CK), DIR_PORT); else /* When access is "off" is important that both lines are set * as inputs or hi-impedance */ outb(v & ~(C2D | C2CK), DIR_PORT); mutex_unlock(&update_lock); } static void duramar2150_c2port_c2d_dir(struct c2port_device *dev, int dir) { u8 v; mutex_lock(&update_lock); v = inb(DIR_PORT); if (dir) outb(v & ~C2D, DIR_PORT); else outb(v | C2D, DIR_PORT); mutex_unlock(&update_lock); } static int duramar2150_c2port_c2d_get(struct c2port_device *dev) { return inb(DATA_PORT) & C2D; } static void duramar2150_c2port_c2d_set(struct c2port_device *dev, int status) { u8 v; mutex_lock(&update_lock); v = inb(DATA_PORT); if (status) outb(v | C2D, DATA_PORT); else outb(v & ~C2D, DATA_PORT); mutex_unlock(&update_lock); } static void duramar2150_c2port_c2ck_set(struct c2port_device *dev, int status) { u8 v; mutex_lock(&update_lock); v = inb(DATA_PORT); if (status) outb(v | C2CK, DATA_PORT); else outb(v & ~C2CK, DATA_PORT); mutex_unlock(&update_lock); } static struct c2port_ops duramar2150_c2port_ops = { .block_size = 512, /* bytes */ .blocks_num = 30, /* total flash size: 15360 bytes */ .access = duramar2150_c2port_access, .c2d_dir = duramar2150_c2port_c2d_dir, .c2d_get = duramar2150_c2port_c2d_get, .c2d_set = duramar2150_c2port_c2d_set, .c2ck_set = duramar2150_c2port_c2ck_set, }; static struct c2port_device *duramar2150_c2port_dev; /* * Module stuff */ static int __init duramar2150_c2port_init(void) { struct resource *res; int ret = 0; res = request_region(0x325, 2, "c2port"); if (!res) return -EBUSY; duramar2150_c2port_dev = c2port_device_register("uc", &duramar2150_c2port_ops, NULL); if (!duramar2150_c2port_dev) { ret = -ENODEV; goto free_region; } return 0; free_region: release_region(0x325, 2); return ret; } static void __exit duramar2150_c2port_exit(void) { /* Setup the GPIOs as input by default (access = 0) */ duramar2150_c2port_access(duramar2150_c2port_dev, 0); c2port_device_unregister(duramar2150_c2port_dev); release_region(0x325, 2); } module_init(duramar2150_c2port_init); module_exit(duramar2150_c2port_exit); MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); MODULE_DESCRIPTION("Silicon Labs C2 port Linux support for Duramar 2150"); MODULE_LICENSE("GPL");
gpl-2.0
Milad1993/linux
drivers/media/usb/gspca/etoms.c
4411
22572
/* * Etoms Et61x151 GPL Linux driver by Michel Xhaard (09/09/2004) * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "etoms" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("Etoms USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ unsigned char autogain; char sensor; #define SENSOR_PAS106 0 #define SENSOR_TAS5130CXX 1 signed char ag_cnt; #define AG_CNT_START 13 }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, /* {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, */ }; static const struct v4l2_pix_format sif_mode[] = { {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; #define ETOMS_ALT_SIZE_1000 12 #define ET_GPIO_DIR_CTRL 0x04 /* Control IO bit[0..5] (0 in 1 out) */ #define ET_GPIO_OUT 0x05 /* Only IO data */ #define ET_GPIO_IN 0x06 /* Read Only IO data */ #define ET_RESET_ALL 0x03 #define ET_ClCK 0x01 #define ET_CTRL 0x02 /* enable i2c OutClck Powerdown mode */ #define ET_COMP 0x12 /* Compression register */ #define ET_MAXQt 0x13 #define ET_MINQt 0x14 #define ET_COMP_VAL0 0x02 #define ET_COMP_VAL1 0x03 #define ET_REG1d 0x1d #define ET_REG1e 0x1e #define ET_REG1f 0x1f #define ET_REG20 0x20 #define ET_REG21 0x21 #define ET_REG22 0x22 #define ET_REG23 0x23 #define ET_REG24 0x24 #define ET_REG25 0x25 /* base registers for luma calculation */ #define ET_LUMA_CENTER 0x39 #define ET_G_RED 0x4d #define ET_G_GREEN1 0x4e #define ET_G_BLUE 0x4f #define ET_G_GREEN2 0x50 #define ET_G_GR_H 0x51 #define ET_G_GB_H 0x52 #define ET_O_RED 0x34 #define ET_O_GREEN1 0x35 #define ET_O_BLUE 0x36 #define ET_O_GREEN2 0x37 #define ET_SYNCHRO 0x68 #define ET_STARTX 0x69 #define ET_STARTY 0x6a #define ET_WIDTH_LOW 0x6b #define ET_HEIGTH_LOW 0x6c #define ET_W_H_HEIGTH 0x6d #define ET_REG6e 0x6e /* OBW */ #define ET_REG6f 0x6f /* OBW */ #define ET_REG70 0x70 /* OBW_AWB */ #define ET_REG71 0x71 /* OBW_AWB */ #define ET_REG72 0x72 /* OBW_AWB */ #define ET_REG73 0x73 /* Clkdelay ns */ #define ET_REG74 0x74 /* test pattern */ #define ET_REG75 0x75 /* test pattern */ #define ET_I2C_CLK 0x8c #define ET_PXL_CLK 0x60 #define ET_I2C_BASE 0x89 #define ET_I2C_COUNT 0x8a #define ET_I2C_PREFETCH 0x8b #define ET_I2C_REG 0x88 #define ET_I2C_DATA7 0x87 #define ET_I2C_DATA6 0x86 #define ET_I2C_DATA5 0x85 #define ET_I2C_DATA4 0x84 #define ET_I2C_DATA3 0x83 #define ET_I2C_DATA2 0x82 #define ET_I2C_DATA1 0x81 #define ET_I2C_DATA0 0x80 #define PAS106_REG2 0x02 /* pxlClk = systemClk/(reg2) */ #define PAS106_REG3 0x03 /* line/frame H [11..4] */ #define PAS106_REG4 0x04 /* line/frame L [3..0] */ #define PAS106_REG5 0x05 /* exposure time line offset(default 5) */ #define PAS106_REG6 0x06 /* exposure time pixel offset(default 6) */ #define PAS106_REG7 0x07 /* signbit Dac (default 0) */ #define PAS106_REG9 0x09 #define PAS106_REG0e 0x0e /* global gain [4..0](default 0x0e) */ #define PAS106_REG13 0x13 /* end i2c write */ static const __u8 GainRGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 }; static const __u8 I2c2[] = { 0x08, 0x08, 0x08, 0x08, 0x0d }; static const __u8 I2c3[] = { 0x12, 0x05 }; static const __u8 I2c4[] = { 0x41, 0x08 }; /* read 'len' bytes to gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, __u16 index, __u16 len) { struct usb_device *dev = gspca_dev->dev; if (len > USB_BUF_SZ) { PERR("reg_r: buffer overflow\n"); return; } usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, len, 500); PDEBUG(D_USBI, "reg read [%02x] -> %02x ..", index, gspca_dev->usb_buf[0]); } static void reg_w_val(struct gspca_dev *gspca_dev, __u16 index, __u8 val) { struct usb_device *dev = gspca_dev->dev; gspca_dev->usb_buf[0] = val; usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, 1, 500); } static void reg_w(struct gspca_dev *gspca_dev, __u16 index, const __u8 *buffer, __u16 len) { struct usb_device *dev = gspca_dev->dev; if (len > USB_BUF_SZ) { pr_err("reg_w: buffer overflow\n"); return; } PDEBUG(D_USBO, "reg write [%02x] = %02x..", index, *buffer); memcpy(gspca_dev->usb_buf, buffer, len); usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, index, gspca_dev->usb_buf, len, 500); } static int i2c_w(struct gspca_dev *gspca_dev, __u8 reg, const __u8 *buffer, int len, __u8 mode) { /* buffer should be [D0..D7] */ __u8 ptchcount; /* set the base address */ reg_w_val(gspca_dev, ET_I2C_BASE, 0x40); /* sensor base for the pas106 */ /* set count and prefetch */ ptchcount = ((len & 0x07) << 4) | (mode & 0x03); reg_w_val(gspca_dev, ET_I2C_COUNT, ptchcount); /* set the register base */ reg_w_val(gspca_dev, ET_I2C_REG, reg); while (--len >= 0) reg_w_val(gspca_dev, ET_I2C_DATA0 + len, buffer[len]); return 0; } static int i2c_r(struct gspca_dev *gspca_dev, __u8 reg) { /* set the base address */ reg_w_val(gspca_dev, ET_I2C_BASE, 0x40); /* sensor base for the pas106 */ /* set count and prefetch (cnd: 4 bits - mode: 4 bits) */ reg_w_val(gspca_dev, ET_I2C_COUNT, 0x11); reg_w_val(gspca_dev, ET_I2C_REG, reg); /* set the register base */ reg_w_val(gspca_dev, ET_I2C_PREFETCH, 0x02); /* prefetch */ reg_w_val(gspca_dev, ET_I2C_PREFETCH, 0x00); reg_r(gspca_dev, ET_I2C_DATA0, 1); /* read one byte */ return 0; } static int Et_WaitStatus(struct gspca_dev *gspca_dev) { int retry = 10; while (retry--) { reg_r(gspca_dev, ET_ClCK, 1); if (gspca_dev->usb_buf[0] != 0) return 1; } return 0; } static int et_video(struct gspca_dev *gspca_dev, int on) { int ret; reg_w_val(gspca_dev, ET_GPIO_OUT, on ? 0x10 /* startvideo - set Bit5 */ : 0); /* stopvideo */ ret = Et_WaitStatus(gspca_dev); if (ret != 0) PERR("timeout video on/off"); return ret; } static void Et_init2(struct gspca_dev *gspca_dev) { __u8 value; static const __u8 FormLine[] = { 0x84, 0x03, 0x14, 0xf4, 0x01, 0x05 }; PDEBUG(D_STREAM, "Open Init2 ET"); reg_w_val(gspca_dev, ET_GPIO_DIR_CTRL, 0x2f); reg_w_val(gspca_dev, ET_GPIO_OUT, 0x10); reg_r(gspca_dev, ET_GPIO_IN, 1); reg_w_val(gspca_dev, ET_ClCK, 0x14); /* 0x14 // 0x16 enabled pattern */ reg_w_val(gspca_dev, ET_CTRL, 0x1b); /* compression et subsampling */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = ET_COMP_VAL1; /* 320 */ else value = ET_COMP_VAL0; /* 640 */ reg_w_val(gspca_dev, ET_COMP, value); reg_w_val(gspca_dev, ET_MAXQt, 0x1f); reg_w_val(gspca_dev, ET_MINQt, 0x04); /* undocumented registers */ reg_w_val(gspca_dev, ET_REG1d, 0xff); reg_w_val(gspca_dev, ET_REG1e, 0xff); reg_w_val(gspca_dev, ET_REG1f, 0xff); reg_w_val(gspca_dev, ET_REG20, 0x35); reg_w_val(gspca_dev, ET_REG21, 0x01); reg_w_val(gspca_dev, ET_REG22, 0x00); reg_w_val(gspca_dev, ET_REG23, 0xff); reg_w_val(gspca_dev, ET_REG24, 0xff); reg_w_val(gspca_dev, ET_REG25, 0x0f); /* colors setting */ reg_w_val(gspca_dev, 0x30, 0x11); /* 0x30 */ reg_w_val(gspca_dev, 0x31, 0x40); reg_w_val(gspca_dev, 0x32, 0x00); reg_w_val(gspca_dev, ET_O_RED, 0x00); /* 0x34 */ reg_w_val(gspca_dev, ET_O_GREEN1, 0x00); reg_w_val(gspca_dev, ET_O_BLUE, 0x00); reg_w_val(gspca_dev, ET_O_GREEN2, 0x00); /*************/ reg_w_val(gspca_dev, ET_G_RED, 0x80); /* 0x4d */ reg_w_val(gspca_dev, ET_G_GREEN1, 0x80); reg_w_val(gspca_dev, ET_G_BLUE, 0x80); reg_w_val(gspca_dev, ET_G_GREEN2, 0x80); reg_w_val(gspca_dev, ET_G_GR_H, 0x00); reg_w_val(gspca_dev, ET_G_GB_H, 0x00); /* 0x52 */ /* Window control registers */ reg_w_val(gspca_dev, 0x61, 0x80); /* use cmc_out */ reg_w_val(gspca_dev, 0x62, 0x02); reg_w_val(gspca_dev, 0x63, 0x03); reg_w_val(gspca_dev, 0x64, 0x14); reg_w_val(gspca_dev, 0x65, 0x0e); reg_w_val(gspca_dev, 0x66, 0x02); reg_w_val(gspca_dev, 0x67, 0x02); /**************************************/ reg_w_val(gspca_dev, ET_SYNCHRO, 0x8f); /* 0x68 */ reg_w_val(gspca_dev, ET_STARTX, 0x69); /* 0x6a //0x69 */ reg_w_val(gspca_dev, ET_STARTY, 0x0d); /* 0x0d //0x0c */ reg_w_val(gspca_dev, ET_WIDTH_LOW, 0x80); reg_w_val(gspca_dev, ET_HEIGTH_LOW, 0xe0); reg_w_val(gspca_dev, ET_W_H_HEIGTH, 0x60); /* 6d */ reg_w_val(gspca_dev, ET_REG6e, 0x86); reg_w_val(gspca_dev, ET_REG6f, 0x01); reg_w_val(gspca_dev, ET_REG70, 0x26); reg_w_val(gspca_dev, ET_REG71, 0x7a); reg_w_val(gspca_dev, ET_REG72, 0x01); /* Clock Pattern registers ***************** */ reg_w_val(gspca_dev, ET_REG73, 0x00); reg_w_val(gspca_dev, ET_REG74, 0x18); /* 0x28 */ reg_w_val(gspca_dev, ET_REG75, 0x0f); /* 0x01 */ /**********************************************/ reg_w_val(gspca_dev, 0x8a, 0x20); reg_w_val(gspca_dev, 0x8d, 0x0f); reg_w_val(gspca_dev, 0x8e, 0x08); /**************************************/ reg_w_val(gspca_dev, 0x03, 0x08); reg_w_val(gspca_dev, ET_PXL_CLK, 0x03); reg_w_val(gspca_dev, 0x81, 0xff); reg_w_val(gspca_dev, 0x80, 0x00); reg_w_val(gspca_dev, 0x81, 0xff); reg_w_val(gspca_dev, 0x80, 0x20); reg_w_val(gspca_dev, 0x03, 0x01); reg_w_val(gspca_dev, 0x03, 0x00); reg_w_val(gspca_dev, 0x03, 0x08); /********************************************/ /* reg_r(gspca_dev, ET_I2C_BASE, 1); always 0x40 as the pas106 ??? */ /* set the sensor */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = 0x04; /* 320 */ else /* 640 */ value = 0x1e; /* 0x17 * setting PixelClock * 0x03 mean 24/(3+1) = 6 Mhz * 0x05 -> 24/(5+1) = 4 Mhz * 0x0b -> 24/(11+1) = 2 Mhz * 0x17 -> 24/(23+1) = 1 Mhz */ reg_w_val(gspca_dev, ET_PXL_CLK, value); /* now set by fifo the FormatLine setting */ reg_w(gspca_dev, 0x62, FormLine, 6); /* set exposure times [ 0..0x78] 0->longvalue 0x78->shortvalue */ reg_w_val(gspca_dev, 0x81, 0x47); /* 0x47; */ reg_w_val(gspca_dev, 0x80, 0x40); /* 0x40; */ /* Pedro change */ /* Brightness change Brith+ decrease value */ /* Brigth- increase value */ /* original value = 0x70; */ reg_w_val(gspca_dev, 0x81, 0x30); /* 0x20; - set brightness */ reg_w_val(gspca_dev, 0x80, 0x20); /* 0x20; */ } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { int i; for (i = 0; i < 4; i++) reg_w_val(gspca_dev, ET_O_RED + i, val); } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { __u8 RGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 }; memset(RGBG, val, sizeof(RGBG) - 2); reg_w(gspca_dev, ET_G_RED, RGBG, 6); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; __u8 I2cc[] = { 0x05, 0x02, 0x02, 0x05, 0x0d }; __u8 i2cflags = 0x01; /* __u8 green = 0; */ I2cc[3] = val; /* red */ I2cc[0] = 15 - val; /* blue */ /* green = 15 - ((((7*I2cc[0]) >> 2 ) + I2cc[3]) >> 1); */ /* I2cc[1] = I2cc[2] = green; */ if (sd->sensor == SENSOR_PAS106) { i2c_w(gspca_dev, PAS106_REG13, &i2cflags, 1, 3); i2c_w(gspca_dev, PAS106_REG9, I2cc, sizeof I2cc, 1); } /* PDEBUG(D_CONF , "Etoms red %d blue %d green %d", I2cc[3], I2cc[0], green); */ } static s32 getcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { /* i2c_r(gspca_dev, PAS106_REG9); * blue */ i2c_r(gspca_dev, PAS106_REG9 + 3); /* red */ return gspca_dev->usb_buf[0] & 0x0f; } return 0; } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->autogain) sd->ag_cnt = AG_CNT_START; else sd->ag_cnt = -1; } static void Et_init1(struct gspca_dev *gspca_dev) { __u8 value; /* __u8 I2c0 [] = {0x0a, 0x12, 0x05, 0x22, 0xac, 0x00, 0x01, 0x00}; */ __u8 I2c0[] = { 0x0a, 0x12, 0x05, 0x6d, 0xcd, 0x00, 0x01, 0x00 }; /* try 1/120 0x6d 0xcd 0x40 */ /* __u8 I2c0 [] = {0x0a, 0x12, 0x05, 0xfe, 0xfe, 0xc0, 0x01, 0x00}; * 1/60000 hmm ?? */ PDEBUG(D_STREAM, "Open Init1 ET"); reg_w_val(gspca_dev, ET_GPIO_DIR_CTRL, 7); reg_r(gspca_dev, ET_GPIO_IN, 1); reg_w_val(gspca_dev, ET_RESET_ALL, 1); reg_w_val(gspca_dev, ET_RESET_ALL, 0); reg_w_val(gspca_dev, ET_ClCK, 0x10); reg_w_val(gspca_dev, ET_CTRL, 0x19); /* compression et subsampling */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) value = ET_COMP_VAL1; else value = ET_COMP_VAL0; PDEBUG(D_STREAM, "Open mode %d Compression %d", gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv, value); reg_w_val(gspca_dev, ET_COMP, value); reg_w_val(gspca_dev, ET_MAXQt, 0x1d); reg_w_val(gspca_dev, ET_MINQt, 0x02); /* undocumented registers */ reg_w_val(gspca_dev, ET_REG1d, 0xff); reg_w_val(gspca_dev, ET_REG1e, 0xff); reg_w_val(gspca_dev, ET_REG1f, 0xff); reg_w_val(gspca_dev, ET_REG20, 0x35); reg_w_val(gspca_dev, ET_REG21, 0x01); reg_w_val(gspca_dev, ET_REG22, 0x00); reg_w_val(gspca_dev, ET_REG23, 0xf7); reg_w_val(gspca_dev, ET_REG24, 0xff); reg_w_val(gspca_dev, ET_REG25, 0x07); /* colors setting */ reg_w_val(gspca_dev, ET_G_RED, 0x80); reg_w_val(gspca_dev, ET_G_GREEN1, 0x80); reg_w_val(gspca_dev, ET_G_BLUE, 0x80); reg_w_val(gspca_dev, ET_G_GREEN2, 0x80); reg_w_val(gspca_dev, ET_G_GR_H, 0x00); reg_w_val(gspca_dev, ET_G_GB_H, 0x00); /* Window control registers */ reg_w_val(gspca_dev, ET_SYNCHRO, 0xf0); reg_w_val(gspca_dev, ET_STARTX, 0x56); /* 0x56 */ reg_w_val(gspca_dev, ET_STARTY, 0x05); /* 0x04 */ reg_w_val(gspca_dev, ET_WIDTH_LOW, 0x60); reg_w_val(gspca_dev, ET_HEIGTH_LOW, 0x20); reg_w_val(gspca_dev, ET_W_H_HEIGTH, 0x50); reg_w_val(gspca_dev, ET_REG6e, 0x86); reg_w_val(gspca_dev, ET_REG6f, 0x01); reg_w_val(gspca_dev, ET_REG70, 0x86); reg_w_val(gspca_dev, ET_REG71, 0x14); reg_w_val(gspca_dev, ET_REG72, 0x00); /* Clock Pattern registers */ reg_w_val(gspca_dev, ET_REG73, 0x00); reg_w_val(gspca_dev, ET_REG74, 0x00); reg_w_val(gspca_dev, ET_REG75, 0x0a); reg_w_val(gspca_dev, ET_I2C_CLK, 0x04); reg_w_val(gspca_dev, ET_PXL_CLK, 0x01); /* set the sensor */ if (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { I2c0[0] = 0x06; i2c_w(gspca_dev, PAS106_REG2, I2c0, sizeof I2c0, 1); i2c_w(gspca_dev, PAS106_REG9, I2c2, sizeof I2c2, 1); value = 0x06; i2c_w(gspca_dev, PAS106_REG2, &value, 1, 1); i2c_w(gspca_dev, PAS106_REG3, I2c3, sizeof I2c3, 1); /* value = 0x1f; */ value = 0x04; i2c_w(gspca_dev, PAS106_REG0e, &value, 1, 1); } else { I2c0[0] = 0x0a; i2c_w(gspca_dev, PAS106_REG2, I2c0, sizeof I2c0, 1); i2c_w(gspca_dev, PAS106_REG9, I2c2, sizeof I2c2, 1); value = 0x0a; i2c_w(gspca_dev, PAS106_REG2, &value, 1, 1); i2c_w(gspca_dev, PAS106_REG3, I2c3, sizeof I2c3, 1); value = 0x04; /* value = 0x10; */ i2c_w(gspca_dev, PAS106_REG0e, &value, 1, 1); /* bit 2 enable bit 1:2 select 0 1 2 3 value = 0x07; * curve 0 * i2c_w(gspca_dev, PAS106_REG0f, &value, 1, 1); */ } /* value = 0x01; */ /* value = 0x22; */ /* i2c_w(gspca_dev, PAS106_REG5, &value, 1, 1); */ /* magnetude and sign bit for DAC */ i2c_w(gspca_dev, PAS106_REG7, I2c4, sizeof I2c4, 1); /* now set by fifo the whole colors setting */ reg_w(gspca_dev, ET_G_RED, GainRGBG, 6); setcolors(gspca_dev, getcolors(gspca_dev)); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; sd->sensor = id->driver_info; if (sd->sensor == SENSOR_PAS106) { cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); } else { cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); } sd->ag_cnt = -1; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) Et_init1(gspca_dev); else Et_init2(gspca_dev); reg_w_val(gspca_dev, ET_RESET_ALL, 0x08); et_video(gspca_dev, 0); /* video off */ return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) Et_init1(gspca_dev); else Et_init2(gspca_dev); setautogain(gspca_dev); reg_w_val(gspca_dev, ET_RESET_ALL, 0x08); et_video(gspca_dev, 1); /* video on */ return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { et_video(gspca_dev, 0); /* video off */ } static __u8 Et_getgainG(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { i2c_r(gspca_dev, PAS106_REG0e); PDEBUG(D_CONF, "Etoms gain G %d", gspca_dev->usb_buf[0]); return gspca_dev->usb_buf[0]; } return 0x1f; } static void Et_setgainG(struct gspca_dev *gspca_dev, __u8 gain) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_PAS106) { __u8 i2cflags = 0x01; i2c_w(gspca_dev, PAS106_REG13, &i2cflags, 1, 3); i2c_w(gspca_dev, PAS106_REG0e, &gain, 1, 1); } } #define BLIMIT(bright) \ (u8)((bright > 0x1f) ? 0x1f : ((bright < 4) ? 3 : bright)) #define LIMIT(color) \ (u8)((color > 0xff) ? 0xff : ((color < 0) ? 0 : color)) static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 luma; __u8 luma_mean = 128; __u8 luma_delta = 20; __u8 spring = 4; int Gbright; __u8 r, g, b; if (sd->ag_cnt < 0) return; if (--sd->ag_cnt >= 0) return; sd->ag_cnt = AG_CNT_START; Gbright = Et_getgainG(gspca_dev); reg_r(gspca_dev, ET_LUMA_CENTER, 4); g = (gspca_dev->usb_buf[0] + gspca_dev->usb_buf[3]) >> 1; r = gspca_dev->usb_buf[1]; b = gspca_dev->usb_buf[2]; r = ((r << 8) - (r << 4) - (r << 3)) >> 10; b = ((b << 7) >> 10); g = ((g << 9) + (g << 7) + (g << 5)) >> 10; luma = LIMIT(r + g + b); PDEBUG(D_FRAM, "Etoms luma G %d", luma); if (luma < luma_mean - luma_delta || luma > luma_mean + luma_delta) { Gbright += (luma_mean - luma) >> spring; Gbright = BLIMIT(Gbright); PDEBUG(D_FRAM, "Etoms Gbright %d", Gbright); Et_setgainG(gspca_dev, (__u8) Gbright); } } #undef BLIMIT #undef LIMIT static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { int seqframe; seqframe = data[0] & 0x3f; len = (int) (((data[0] & 0xc0) << 2) | data[1]); if (seqframe == 0x3f) { PDEBUG(D_FRAM, "header packet found datalength %d !!", len); PDEBUG(D_FRAM, "G %d R %d G %d B %d", data[2], data[3], data[4], data[5]); data += 30; /* don't change datalength as the chips provided it */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; } if (len) { data += 8; gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } else { /* Drop Packet */ gspca_dev->last_packet_type = DISCARD_PACKET; } } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: sd->autogain = ctrl->val; setautogain(gspca_dev); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 1, 127, 1, 63); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 127); if (sd->sensor == SENSOR_PAS106) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 15, 1, 7); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106}, {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
isimobile/android_kernel_sony_fusion3
arch/sparc/mm/btfixup.c
4411
10193
/* btfixup.c: Boot time code fixup and relocator, so that * we can get rid of most indirect calls to achieve single * image sun4c and srmmu kernel. * * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/btfixup.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/cacheflush.h> #define BTFIXUP_OPTIMIZE_NOP #define BTFIXUP_OPTIMIZE_OTHER extern char *srmmu_name; static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for "; static char str_sun4c[] __initdata = "sun4c\n"; static char str_srmmu[] __initdata = "srmmu[%s]/"; static char str_iommu[] __initdata = "iommu\n"; static char str_iounit[] __initdata = "io-unit\n"; static int visited __initdata = 0; extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[]; extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[]; static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n"; static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n"; static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n"; static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n"; static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n"; static char wrong[] __initdata = "Wrong address for %c fixup %p\n"; static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n"; static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n"; static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n"; static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n"; static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n"; static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n"; static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n"; static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n"; #ifdef BTFIXUP_OPTIMIZE_OTHER static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) { if (!fmangled) *addr = value; else { unsigned int *q = (unsigned int *)q1; if (*addr == 0x01000000) { /* Noped */ *q = value; } else if (addr[-1] == *q) { /* Moved */ addr[-1] = value; *q = value; } else { prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value); prom_halt(); } } } #else static inline void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) { *addr = value; } #endif void __init btfixup(void) { unsigned int *p, *q; int type, count; unsigned insn; unsigned *addr; int fmangled = 0; void (*flush_cacheall)(void); if (!visited) { visited++; printk(version); if (ARCH_SUN4C) printk(str_sun4c); else { printk(str_srmmu, srmmu_name); if (sparc_cpu_model == sun4d) printk(str_iounit); else printk(str_iommu); } } for (p = ___btfixup_start; p < ___btfixup_end; ) { count = p[2]; q = p + 3; switch (type = *(unsigned char *)p) { case 'f': count = p[3]; q = p + 4; if (((p[0] & 1) || p[1]) && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) { prom_printf(wrong_f, p, p[1]); prom_halt(); } break; case 'b': if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) { prom_printf(wrong_b, p, p[1]); prom_halt(); } break; case 's': if (p[1] + 0x1000 >= 0x2000) { prom_printf(wrong_s, p, p[1]); prom_halt(); } break; case 'h': if (p[1] & 0x3ff) { prom_printf(wrong_h, p, p[1]); prom_halt(); } break; case 'a': if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) { prom_printf(wrong_a, p, p[1]); prom_halt(); } break; } if (p[0] & 1) { p[0] &= ~1; while (count) { fmangled = 0; addr = (unsigned *)*q; if (addr < _stext || addr >= _end) { prom_printf(wrong, type, p); prom_halt(); } insn = *addr; #ifdef BTFIXUP_OPTIMIZE_OTHER if (type != 'f' && q[1]) { insn = *(unsigned int *)q[1]; if (!insn || insn == 1) insn = *addr; else fmangled = 1; } #endif switch (type) { case 'f': /* CALL */ if (addr >= __start___ksymtab && addr < __stop___ksymtab) { *addr = p[1]; break; } else if (!q[1]) { if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */ *addr = (insn & 0xffc00000) | (p[1] >> 10); break; } else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */ *addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break; } else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */ bad_f: prom_printf(insn_f, p, addr, insn, addr[1]); prom_halt(); } } else if (q[1] != 1) addr[1] = q[1]; if (p[2] == BTFIXUPCALL_NORM) { norm_f: *addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2); q[1] = 0; break; } #ifndef BTFIXUP_OPTIMIZE_NOP goto norm_f; #else if (!(addr[1] & 0x80000000)) { if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */ goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */ } else { if ((addr[1] & 0x01800000) == 0x01800000) { if ((addr[1] & 0x01f80000) == 0x01e80000) { /* RESTORE */ goto norm_f; /* It is dangerous to patch that */ } goto bad_f; } if ((addr[1] & 0xffffe003) == 0x9e03e000) { /* ADD %O7, XX, %o7 */ int displac = (addr[1] << 19); displac = (displac >> 21) + 2; *addr = (0x10800000) + (displac & 0x3fffff); q[1] = addr[1]; addr[1] = p[2]; break; } if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000) goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */ if ((addr[1] & 0x3e000000) == 0x1e000000) goto norm_f; /* rd is %o7. We'd better take care. */ } if (p[2] == BTFIXUPCALL_NOP) { *addr = 0x01000000; q[1] = 1; break; } #ifndef BTFIXUP_OPTIMIZE_OTHER goto norm_f; #else if (addr[1] == 0x01000000) { /* NOP in the delay slot */ q[1] = addr[1]; *addr = p[2]; break; } if ((addr[1] & 0xc0000000) != 0xc0000000) { /* Not a memory operation */ if ((addr[1] & 0x30000000) == 0x10000000) { /* Ok, non-memory op with rd %oX */ if ((addr[1] & 0x3e000000) == 0x1c000000) goto bad_f; /* Aiee. Someone is playing strange %sp tricks */ if ((addr[1] & 0x3e000000) > 0x12000000 || ((addr[1] & 0x3e000000) == 0x12000000 && p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) || ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) { /* Nobody uses the result. We can nop it out. */ *addr = p[2]; q[1] = addr[1]; addr[1] = 0x01000000; break; } if ((addr[1] & 0xf1ffffe0) == 0x90100000) { /* MOV %reg, %Ox */ if ((addr[1] & 0x3e000000) == 0x10000000 && (p[2] & 0x7c000) == 0x20000) { /* Ok, it is call xx; mov reg, %o0 and call optimizes to doing something on %o0. Patch the patch. */ *addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14); q[1] = addr[1]; addr[1] = 0x01000000; break; } if ((addr[1] & 0x3e000000) == 0x12000000 && p[2] == BTFIXUPCALL_STO1O0) { *addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25); q[1] = addr[1]; addr[1] = 0x01000000; break; } } } } *addr = addr[1]; q[1] = addr[1]; addr[1] = p[2]; break; #endif /* BTFIXUP_OPTIMIZE_OTHER */ #endif /* BTFIXUP_OPTIMIZE_NOP */ case 'b': /* BLACKBOX */ /* Has to be sethi i, xx */ if ((insn & 0xc1c00000) != 0x01000000) { prom_printf(insn_b, p, addr, insn); prom_halt(); } else { void (*do_fixup)(unsigned *); do_fixup = (void (*)(unsigned *))p[1]; do_fixup(addr); } break; case 's': /* SIMM13 */ /* Has to be or %g0, i, xx */ if ((insn & 0xc1ffe000) != 0x80102000) { prom_printf(insn_s, p, addr, insn); prom_halt(); } set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff)); break; case 'h': /* SETHI */ /* Has to be sethi i, xx */ if ((insn & 0xc1c00000) != 0x01000000) { prom_printf(insn_h, p, addr, insn); prom_halt(); } set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); break; case 'a': /* HALF */ /* Has to be sethi i, xx or or %g0, i, xx */ if ((insn & 0xc1c00000) != 0x01000000 && (insn & 0xc1ffe000) != 0x80102000) { prom_printf(insn_a, p, addr, insn); prom_halt(); } if (p[1] & 0x3ff) set_addr(addr, q[1], fmangled, (insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff)); else set_addr(addr, q[1], fmangled, (insn & 0x3e000000) | 0x01000000 | (p[1] >> 10)); break; case 'i': /* INT */ if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); else if ((insn & 0x80002000) == 0x80002000) /* %LO */ set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); else { prom_printf(insn_i, p, addr, insn); prom_halt(); } break; } count -= 2; q += 2; } } else p = q + count; } #ifdef CONFIG_SMP flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all); #else flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all); #endif if (!flush_cacheall) { prom_printf(fca_und); prom_halt(); } (*flush_cacheall)(); }
gpl-2.0
playfulgod/android_kernel_lge_kk_zee
sound/soc/blackfin/bf5xx-ad73311.c
7227
5767
/* * File: sound/soc/blackfin/bf5xx-ad73311.c * Author: Cliff Cai <Cliff.Cai@analog.com> * * Created: Thur Sep 25 2008 * Description: Board driver for ad73311 sound chip * * Modified: * Copyright 2008 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <asm/blackfin.h> #include <asm/cacheflush.h> #include <asm/irq.h> #include <asm/dma.h> #include <asm/portmux.h> #include "../codecs/ad73311.h" #include "bf5xx-sport.h" #include "bf5xx-i2s-pcm.h" #if CONFIG_SND_BF5XX_SPORT_NUM == 0 #define bfin_write_SPORT_TCR1 bfin_write_SPORT0_TCR1 #define bfin_read_SPORT_TCR1 bfin_read_SPORT0_TCR1 #define bfin_write_SPORT_TCR2 bfin_write_SPORT0_TCR2 #define bfin_write_SPORT_TX16 bfin_write_SPORT0_TX16 #define bfin_read_SPORT_STAT bfin_read_SPORT0_STAT #else #define bfin_write_SPORT_TCR1 bfin_write_SPORT1_TCR1 #define bfin_read_SPORT_TCR1 bfin_read_SPORT1_TCR1 #define bfin_write_SPORT_TCR2 bfin_write_SPORT1_TCR2 #define bfin_write_SPORT_TX16 bfin_write_SPORT1_TX16 #define bfin_read_SPORT_STAT bfin_read_SPORT1_STAT #endif #define GPIO_SE CONFIG_SND_BFIN_AD73311_SE static struct snd_soc_card bf5xx_ad73311; static int snd_ad73311_startup(void) { pr_debug("%s enter\n", __func__); /* Pull up SE pin on AD73311L */ gpio_set_value(GPIO_SE, 1); return 0; } static int snd_ad73311_configure(void) { unsigned short ctrl_regs[6]; unsigned short status = 0; int count = 0; /* DMCLK = MCLK = 16.384 MHz * SCLK = DMCLK/8 = 2.048 MHz * Sample Rate = DMCLK/2048 = 8 KHz */ ctrl_regs[0] = AD_CONTROL | AD_WRITE | CTRL_REG_B | REGB_MCDIV(0) | \ REGB_SCDIV(0) | REGB_DIRATE(0); ctrl_regs[1] = AD_CONTROL | AD_WRITE | CTRL_REG_C | REGC_PUDEV | \ REGC_PUADC | REGC_PUDAC | REGC_PUREF | REGC_REFUSE ; ctrl_regs[2] = AD_CONTROL | AD_WRITE | CTRL_REG_D | REGD_OGS(2) | \ REGD_IGS(2); ctrl_regs[3] = AD_CONTROL | AD_WRITE | CTRL_REG_E | REGE_DA(0x1f); ctrl_regs[4] = AD_CONTROL | AD_WRITE | CTRL_REG_F | REGF_SEEN ; ctrl_regs[5] = AD_CONTROL | AD_WRITE | CTRL_REG_A | REGA_MODE_DATA; local_irq_disable(); snd_ad73311_startup(); udelay(1); bfin_write_SPORT_TCR1(TFSR); bfin_write_SPORT_TCR2(0xF); SSYNC(); /* SPORT Tx Register is a 8 x 16 FIFO, all the data can be put to * FIFO before enable SPORT to transfer the data */ for (count = 0; count < 6; count++) bfin_write_SPORT_TX16(ctrl_regs[count]); SSYNC(); bfin_write_SPORT_TCR1(bfin_read_SPORT_TCR1() | TSPEN); SSYNC(); /* When TUVF is set, the data is already send out */ while (!(status & TUVF) && ++count < 10000) { udelay(1); status = bfin_read_SPORT_STAT(); SSYNC(); } bfin_write_SPORT_TCR1(bfin_read_SPORT_TCR1() & ~TSPEN); SSYNC(); local_irq_enable(); if (count >= 10000) { printk(KERN_ERR "ad73311: failed to configure codec\n"); return -1; } return 0; } static int bf5xx_probe(struct snd_soc_card *card) { int err; if (gpio_request(GPIO_SE, "AD73311_SE")) { printk(KERN_ERR "%s: Failed ro request GPIO_%d\n", __func__, GPIO_SE); return -EBUSY; } gpio_direction_output(GPIO_SE, 0); err = snd_ad73311_configure(); if (err < 0) return -EFAULT; return 0; } #define BF5XX_AD7311_DAI_FMT (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | \ SND_SOC_DAIFMT_CBM_CFM) static struct snd_soc_dai_link bf5xx_ad73311_dai[] = { { .name = "ad73311", .stream_name = "AD73311", .cpu_dai_name = "bfin-i2s.0", .codec_dai_name = "ad73311-hifi", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ad73311", .dai_fmt = BF5XX_AD7311_DAI_FMT, }, { .name = "ad73311", .stream_name = "AD73311", .cpu_dai_name = "bfin-i2s.1", .codec_dai_name = "ad73311-hifi", .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ad73311", .dai_fmt = BF5XX_AD7311_DAI_FMT, }, }; static struct snd_soc_card bf5xx_ad73311 = { .name = "bfin-ad73311", .owner = THIS_MODULE, .probe = bf5xx_probe, .dai_link = &bf5xx_ad73311_dai[CONFIG_SND_BF5XX_SPORT_NUM], .num_links = 1, }; static struct platform_device *bf5xx_ad73311_snd_device; static int __init bf5xx_ad73311_init(void) { int ret; pr_debug("%s enter\n", __func__); bf5xx_ad73311_snd_device = platform_device_alloc("soc-audio", -1); if (!bf5xx_ad73311_snd_device) return -ENOMEM; platform_set_drvdata(bf5xx_ad73311_snd_device, &bf5xx_ad73311); ret = platform_device_add(bf5xx_ad73311_snd_device); if (ret) platform_device_put(bf5xx_ad73311_snd_device); return ret; } static void __exit bf5xx_ad73311_exit(void) { pr_debug("%s enter\n", __func__); platform_device_unregister(bf5xx_ad73311_snd_device); } module_init(bf5xx_ad73311_init); module_exit(bf5xx_ad73311_exit); /* Module information */ MODULE_AUTHOR("Cliff Cai"); MODULE_DESCRIPTION("ALSA SoC AD73311 Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
JPRasquin/Ubuntu12.04
net/appletalk/aarp.c
7483
25491
/* * AARP: An implementation of the AppleTalk AARP protocol for * Ethernet 'ELAP'. * * Alan Cox <Alan.Cox@linux.org> * * This doesn't fit cleanly with the IP arp. Potentially we can use * the generic neighbour discovery code to clean this up. * * FIXME: * We ought to handle the retransmits with a single list and a * separate fast timer for when it is needed. * Use neighbour discovery code. * Token Ring Support. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * References: * Inside AppleTalk (2nd Ed). * Fixes: * Jaume Grau - flush caches on AARP_PROBE * Rob Newberry - Added proxy AARP and AARP proc fs, * moved probing from DDP module. * Arnaldo C. Melo - don't mangle rx packets * */ #include <linux/if_arp.h> #include <linux/slab.h> #include <net/sock.h> #include <net/datalink.h> #include <net/psnap.h> #include <linux/atalk.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/export.h> int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME; int sysctl_aarp_tick_time = AARP_TICK_TIME; int sysctl_aarp_retransmit_limit = AARP_RETRANSMIT_LIMIT; int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME; /* Lists of aarp entries */ /** * struct aarp_entry - AARP entry * @last_sent - Last time we xmitted the aarp request * @packet_queue - Queue of frames wait for resolution * @status - Used for proxy AARP * expires_at - Entry expiry time * target_addr - DDP Address * dev - Device to use * hwaddr - Physical i/f address of target/router * xmit_count - When this hits 10 we give up * next - Next entry in chain */ struct aarp_entry { /* These first two are only used for unresolved entries */ unsigned long last_sent; struct sk_buff_head packet_queue; int status; unsigned long expires_at; struct atalk_addr target_addr; struct net_device *dev; char hwaddr[6]; unsigned short xmit_count; struct aarp_entry *next; }; /* Hashed list of resolved, unresolved and proxy entries */ static struct aarp_entry *resolved[AARP_HASH_SIZE]; static struct aarp_entry *unresolved[AARP_HASH_SIZE]; static struct aarp_entry *proxies[AARP_HASH_SIZE]; static int unresolved_count; /* One lock protects it all. */ static DEFINE_RWLOCK(aarp_lock); /* Used to walk the list and purge/kick entries. */ static struct timer_list aarp_timer; /* * Delete an aarp queue * * Must run under aarp_lock. */ static void __aarp_expire(struct aarp_entry *a) { skb_queue_purge(&a->packet_queue); kfree(a); } /* * Send an aarp queue entry request * * Must run under aarp_lock. */ static void __aarp_send_query(struct aarp_entry *a) { static unsigned char aarp_eth_multicast[ETH_ALEN] = { 0x09, 0x00, 0x07, 0xFF, 0xFF, 0xFF }; struct net_device *dev = a->dev; struct elapaarp *eah; int len = dev->hard_header_len + sizeof(*eah) + aarp_dl->header_length; struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); struct atalk_addr *sat = atalk_find_dev_addr(dev); if (!skb) return; if (!sat) { kfree_skb(skb); return; } /* Set up the buffer */ skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb_put(skb, sizeof(*eah)); skb->protocol = htons(ETH_P_ATALK); skb->dev = dev; eah = aarp_hdr(skb); /* Set up the ARP */ eah->hw_type = htons(AARP_HW_TYPE_ETHERNET); eah->pa_type = htons(ETH_P_ATALK); eah->hw_len = ETH_ALEN; eah->pa_len = AARP_PA_ALEN; eah->function = htons(AARP_REQUEST); memcpy(eah->hw_src, dev->dev_addr, ETH_ALEN); eah->pa_src_zero = 0; eah->pa_src_net = sat->s_net; eah->pa_src_node = sat->s_node; memset(eah->hw_dst, '\0', ETH_ALEN); eah->pa_dst_zero = 0; eah->pa_dst_net = a->target_addr.s_net; eah->pa_dst_node = a->target_addr.s_node; /* Send it */ aarp_dl->request(aarp_dl, skb, aarp_eth_multicast); /* Update the sending count */ a->xmit_count++; a->last_sent = jiffies; } /* This runs under aarp_lock and in softint context, so only atomic memory * allocations can be used. */ static void aarp_send_reply(struct net_device *dev, struct atalk_addr *us, struct atalk_addr *them, unsigned char *sha) { struct elapaarp *eah; int len = dev->hard_header_len + sizeof(*eah) + aarp_dl->header_length; struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); if (!skb) return; /* Set up the buffer */ skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb_put(skb, sizeof(*eah)); skb->protocol = htons(ETH_P_ATALK); skb->dev = dev; eah = aarp_hdr(skb); /* Set up the ARP */ eah->hw_type = htons(AARP_HW_TYPE_ETHERNET); eah->pa_type = htons(ETH_P_ATALK); eah->hw_len = ETH_ALEN; eah->pa_len = AARP_PA_ALEN; eah->function = htons(AARP_REPLY); memcpy(eah->hw_src, dev->dev_addr, ETH_ALEN); eah->pa_src_zero = 0; eah->pa_src_net = us->s_net; eah->pa_src_node = us->s_node; if (!sha) memset(eah->hw_dst, '\0', ETH_ALEN); else memcpy(eah->hw_dst, sha, ETH_ALEN); eah->pa_dst_zero = 0; eah->pa_dst_net = them->s_net; eah->pa_dst_node = them->s_node; /* Send it */ aarp_dl->request(aarp_dl, skb, sha); } /* * Send probe frames. Called from aarp_probe_network and * aarp_proxy_probe_network. */ static void aarp_send_probe(struct net_device *dev, struct atalk_addr *us) { struct elapaarp *eah; int len = dev->hard_header_len + sizeof(*eah) + aarp_dl->header_length; struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); static unsigned char aarp_eth_multicast[ETH_ALEN] = { 0x09, 0x00, 0x07, 0xFF, 0xFF, 0xFF }; if (!skb) return; /* Set up the buffer */ skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb_put(skb, sizeof(*eah)); skb->protocol = htons(ETH_P_ATALK); skb->dev = dev; eah = aarp_hdr(skb); /* Set up the ARP */ eah->hw_type = htons(AARP_HW_TYPE_ETHERNET); eah->pa_type = htons(ETH_P_ATALK); eah->hw_len = ETH_ALEN; eah->pa_len = AARP_PA_ALEN; eah->function = htons(AARP_PROBE); memcpy(eah->hw_src, dev->dev_addr, ETH_ALEN); eah->pa_src_zero = 0; eah->pa_src_net = us->s_net; eah->pa_src_node = us->s_node; memset(eah->hw_dst, '\0', ETH_ALEN); eah->pa_dst_zero = 0; eah->pa_dst_net = us->s_net; eah->pa_dst_node = us->s_node; /* Send it */ aarp_dl->request(aarp_dl, skb, aarp_eth_multicast); } /* * Handle an aarp timer expire * * Must run under the aarp_lock. */ static void __aarp_expire_timer(struct aarp_entry **n) { struct aarp_entry *t; while (*n) /* Expired ? */ if (time_after(jiffies, (*n)->expires_at)) { t = *n; *n = (*n)->next; __aarp_expire(t); } else n = &((*n)->next); } /* * Kick all pending requests 5 times a second. * * Must run under the aarp_lock. */ static void __aarp_kick(struct aarp_entry **n) { struct aarp_entry *t; while (*n) /* Expired: if this will be the 11th tx, we delete instead. */ if ((*n)->xmit_count >= sysctl_aarp_retransmit_limit) { t = *n; *n = (*n)->next; __aarp_expire(t); } else { __aarp_send_query(*n); n = &((*n)->next); } } /* * A device has gone down. Take all entries referring to the device * and remove them. * * Must run under the aarp_lock. */ static void __aarp_expire_device(struct aarp_entry **n, struct net_device *dev) { struct aarp_entry *t; while (*n) if ((*n)->dev == dev) { t = *n; *n = (*n)->next; __aarp_expire(t); } else n = &((*n)->next); } /* Handle the timer event */ static void aarp_expire_timeout(unsigned long unused) { int ct; write_lock_bh(&aarp_lock); for (ct = 0; ct < AARP_HASH_SIZE; ct++) { __aarp_expire_timer(&resolved[ct]); __aarp_kick(&unresolved[ct]); __aarp_expire_timer(&unresolved[ct]); __aarp_expire_timer(&proxies[ct]); } write_unlock_bh(&aarp_lock); mod_timer(&aarp_timer, jiffies + (unresolved_count ? sysctl_aarp_tick_time : sysctl_aarp_expiry_time)); } /* Network device notifier chain handler. */ static int aarp_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; int ct; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event == NETDEV_DOWN) { write_lock_bh(&aarp_lock); for (ct = 0; ct < AARP_HASH_SIZE; ct++) { __aarp_expire_device(&resolved[ct], dev); __aarp_expire_device(&unresolved[ct], dev); __aarp_expire_device(&proxies[ct], dev); } write_unlock_bh(&aarp_lock); } return NOTIFY_DONE; } /* Expire all entries in a hash chain */ static void __aarp_expire_all(struct aarp_entry **n) { struct aarp_entry *t; while (*n) { t = *n; *n = (*n)->next; __aarp_expire(t); } } /* Cleanup all hash chains -- module unloading */ static void aarp_purge(void) { int ct; write_lock_bh(&aarp_lock); for (ct = 0; ct < AARP_HASH_SIZE; ct++) { __aarp_expire_all(&resolved[ct]); __aarp_expire_all(&unresolved[ct]); __aarp_expire_all(&proxies[ct]); } write_unlock_bh(&aarp_lock); } /* * Create a new aarp entry. This must use GFP_ATOMIC because it * runs while holding spinlocks. */ static struct aarp_entry *aarp_alloc(void) { struct aarp_entry *a = kmalloc(sizeof(*a), GFP_ATOMIC); if (a) skb_queue_head_init(&a->packet_queue); return a; } /* * Find an entry. We might return an expired but not yet purged entry. We * don't care as it will do no harm. * * This must run under the aarp_lock. */ static struct aarp_entry *__aarp_find_entry(struct aarp_entry *list, struct net_device *dev, struct atalk_addr *sat) { while (list) { if (list->target_addr.s_net == sat->s_net && list->target_addr.s_node == sat->s_node && list->dev == dev) break; list = list->next; } return list; } /* Called from the DDP code, and thus must be exported. */ void aarp_proxy_remove(struct net_device *dev, struct atalk_addr *sa) { int hash = sa->s_node % (AARP_HASH_SIZE - 1); struct aarp_entry *a; write_lock_bh(&aarp_lock); a = __aarp_find_entry(proxies[hash], dev, sa); if (a) a->expires_at = jiffies - 1; write_unlock_bh(&aarp_lock); } /* This must run under aarp_lock. */ static struct atalk_addr *__aarp_proxy_find(struct net_device *dev, struct atalk_addr *sa) { int hash = sa->s_node % (AARP_HASH_SIZE - 1); struct aarp_entry *a = __aarp_find_entry(proxies[hash], dev, sa); return a ? sa : NULL; } /* * Probe a Phase 1 device or a device that requires its Net:Node to * be set via an ioctl. */ static void aarp_send_probe_phase1(struct atalk_iface *iface) { struct ifreq atreq; struct sockaddr_at *sa = (struct sockaddr_at *)&atreq.ifr_addr; const struct net_device_ops *ops = iface->dev->netdev_ops; sa->sat_addr.s_node = iface->address.s_node; sa->sat_addr.s_net = ntohs(iface->address.s_net); /* We pass the Net:Node to the drivers/cards by a Device ioctl. */ if (!(ops->ndo_do_ioctl(iface->dev, &atreq, SIOCSIFADDR))) { ops->ndo_do_ioctl(iface->dev, &atreq, SIOCGIFADDR); if (iface->address.s_net != htons(sa->sat_addr.s_net) || iface->address.s_node != sa->sat_addr.s_node) iface->status |= ATIF_PROBE_FAIL; iface->address.s_net = htons(sa->sat_addr.s_net); iface->address.s_node = sa->sat_addr.s_node; } } void aarp_probe_network(struct atalk_iface *atif) { if (atif->dev->type == ARPHRD_LOCALTLK || atif->dev->type == ARPHRD_PPP) aarp_send_probe_phase1(atif); else { unsigned int count; for (count = 0; count < AARP_RETRANSMIT_LIMIT; count++) { aarp_send_probe(atif->dev, &atif->address); /* Defer 1/10th */ msleep(100); if (atif->status & ATIF_PROBE_FAIL) break; } } } int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa) { int hash, retval = -EPROTONOSUPPORT; struct aarp_entry *entry; unsigned int count; /* * we don't currently support LocalTalk or PPP for proxy AARP; * if someone wants to try and add it, have fun */ if (atif->dev->type == ARPHRD_LOCALTLK || atif->dev->type == ARPHRD_PPP) goto out; /* * create a new AARP entry with the flags set to be published -- * we need this one to hang around even if it's in use */ entry = aarp_alloc(); retval = -ENOMEM; if (!entry) goto out; entry->expires_at = -1; entry->status = ATIF_PROBE; entry->target_addr.s_node = sa->s_node; entry->target_addr.s_net = sa->s_net; entry->dev = atif->dev; write_lock_bh(&aarp_lock); hash = sa->s_node % (AARP_HASH_SIZE - 1); entry->next = proxies[hash]; proxies[hash] = entry; for (count = 0; count < AARP_RETRANSMIT_LIMIT; count++) { aarp_send_probe(atif->dev, sa); /* Defer 1/10th */ write_unlock_bh(&aarp_lock); msleep(100); write_lock_bh(&aarp_lock); if (entry->status & ATIF_PROBE_FAIL) break; } if (entry->status & ATIF_PROBE_FAIL) { entry->expires_at = jiffies - 1; /* free the entry */ retval = -EADDRINUSE; /* return network full */ } else { /* clear the probing flag */ entry->status &= ~ATIF_PROBE; retval = 1; } write_unlock_bh(&aarp_lock); out: return retval; } /* Send a DDP frame */ int aarp_send_ddp(struct net_device *dev, struct sk_buff *skb, struct atalk_addr *sa, void *hwaddr) { static char ddp_eth_multicast[ETH_ALEN] = { 0x09, 0x00, 0x07, 0xFF, 0xFF, 0xFF }; int hash; struct aarp_entry *a; skb_reset_network_header(skb); /* Check for LocalTalk first */ if (dev->type == ARPHRD_LOCALTLK) { struct atalk_addr *at = atalk_find_dev_addr(dev); struct ddpehdr *ddp = (struct ddpehdr *)skb->data; int ft = 2; /* * Compressible ? * * IFF: src_net == dest_net == device_net * (zero matches anything) */ if ((!ddp->deh_snet || at->s_net == ddp->deh_snet) && (!ddp->deh_dnet || at->s_net == ddp->deh_dnet)) { skb_pull(skb, sizeof(*ddp) - 4); /* * The upper two remaining bytes are the port * numbers we just happen to need. Now put the * length in the lower two. */ *((__be16 *)skb->data) = htons(skb->len); ft = 1; } /* * Nice and easy. No AARP type protocols occur here so we can * just shovel it out with a 3 byte LLAP header */ skb_push(skb, 3); skb->data[0] = sa->s_node; skb->data[1] = at->s_node; skb->data[2] = ft; skb->dev = dev; goto sendit; } /* On a PPP link we neither compress nor aarp. */ if (dev->type == ARPHRD_PPP) { skb->protocol = htons(ETH_P_PPPTALK); skb->dev = dev; goto sendit; } /* Non ELAP we cannot do. */ if (dev->type != ARPHRD_ETHER) goto free_it; skb->dev = dev; skb->protocol = htons(ETH_P_ATALK); hash = sa->s_node % (AARP_HASH_SIZE - 1); /* Do we have a resolved entry? */ if (sa->s_node == ATADDR_BCAST) { /* Send it */ ddp_dl->request(ddp_dl, skb, ddp_eth_multicast); goto sent; } write_lock_bh(&aarp_lock); a = __aarp_find_entry(resolved[hash], dev, sa); if (a) { /* Return 1 and fill in the address */ a->expires_at = jiffies + (sysctl_aarp_expiry_time * 10); ddp_dl->request(ddp_dl, skb, a->hwaddr); write_unlock_bh(&aarp_lock); goto sent; } /* Do we have an unresolved entry: This is the less common path */ a = __aarp_find_entry(unresolved[hash], dev, sa); if (a) { /* Queue onto the unresolved queue */ skb_queue_tail(&a->packet_queue, skb); goto out_unlock; } /* Allocate a new entry */ a = aarp_alloc(); if (!a) { /* Whoops slipped... good job it's an unreliable protocol 8) */ write_unlock_bh(&aarp_lock); goto free_it; } /* Set up the queue */ skb_queue_tail(&a->packet_queue, skb); a->expires_at = jiffies + sysctl_aarp_resolve_time; a->dev = dev; a->next = unresolved[hash]; a->target_addr = *sa; a->xmit_count = 0; unresolved[hash] = a; unresolved_count++; /* Send an initial request for the address */ __aarp_send_query(a); /* * Switch to fast timer if needed (That is if this is the first * unresolved entry to get added) */ if (unresolved_count == 1) mod_timer(&aarp_timer, jiffies + sysctl_aarp_tick_time); /* Now finally, it is safe to drop the lock. */ out_unlock: write_unlock_bh(&aarp_lock); /* Tell the ddp layer we have taken over for this frame. */ goto sent; sendit: if (skb->sk) skb->priority = skb->sk->sk_priority; if (dev_queue_xmit(skb)) goto drop; sent: return NET_XMIT_SUCCESS; free_it: kfree_skb(skb); drop: return NET_XMIT_DROP; } EXPORT_SYMBOL(aarp_send_ddp); /* * An entry in the aarp unresolved queue has become resolved. Send * all the frames queued under it. * * Must run under aarp_lock. */ static void __aarp_resolved(struct aarp_entry **list, struct aarp_entry *a, int hash) { struct sk_buff *skb; while (*list) if (*list == a) { unresolved_count--; *list = a->next; /* Move into the resolved list */ a->next = resolved[hash]; resolved[hash] = a; /* Kick frames off */ while ((skb = skb_dequeue(&a->packet_queue)) != NULL) { a->expires_at = jiffies + sysctl_aarp_expiry_time * 10; ddp_dl->request(ddp_dl, skb, a->hwaddr); } } else list = &((*list)->next); } /* * This is called by the SNAP driver whenever we see an AARP SNAP * frame. We currently only support Ethernet. */ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct elapaarp *ea = aarp_hdr(skb); int hash, ret = 0; __u16 function; struct aarp_entry *a; struct atalk_addr sa, *ma, da; struct atalk_iface *ifa; if (!net_eq(dev_net(dev), &init_net)) goto out0; /* We only do Ethernet SNAP AARP. */ if (dev->type != ARPHRD_ETHER) goto out0; /* Frame size ok? */ if (!skb_pull(skb, sizeof(*ea))) goto out0; function = ntohs(ea->function); /* Sanity check fields. */ if (function < AARP_REQUEST || function > AARP_PROBE || ea->hw_len != ETH_ALEN || ea->pa_len != AARP_PA_ALEN || ea->pa_src_zero || ea->pa_dst_zero) goto out0; /* Looks good. */ hash = ea->pa_src_node % (AARP_HASH_SIZE - 1); /* Build an address. */ sa.s_node = ea->pa_src_node; sa.s_net = ea->pa_src_net; /* Process the packet. Check for replies of me. */ ifa = atalk_find_dev(dev); if (!ifa) goto out1; if (ifa->status & ATIF_PROBE && ifa->address.s_node == ea->pa_dst_node && ifa->address.s_net == ea->pa_dst_net) { ifa->status |= ATIF_PROBE_FAIL; /* Fail the probe (in use) */ goto out1; } /* Check for replies of proxy AARP entries */ da.s_node = ea->pa_dst_node; da.s_net = ea->pa_dst_net; write_lock_bh(&aarp_lock); a = __aarp_find_entry(proxies[hash], dev, &da); if (a && a->status & ATIF_PROBE) { a->status |= ATIF_PROBE_FAIL; /* * we do not respond to probe or request packets for * this address while we are probing this address */ goto unlock; } switch (function) { case AARP_REPLY: if (!unresolved_count) /* Speed up */ break; /* Find the entry. */ a = __aarp_find_entry(unresolved[hash], dev, &sa); if (!a || dev != a->dev) break; /* We can fill one in - this is good. */ memcpy(a->hwaddr, ea->hw_src, ETH_ALEN); __aarp_resolved(&unresolved[hash], a, hash); if (!unresolved_count) mod_timer(&aarp_timer, jiffies + sysctl_aarp_expiry_time); break; case AARP_REQUEST: case AARP_PROBE: /* * If it is my address set ma to my address and reply. * We can treat probe and request the same. Probe * simply means we shouldn't cache the querying host, * as in a probe they are proposing an address not * using one. * * Support for proxy-AARP added. We check if the * address is one of our proxies before we toss the * packet out. */ sa.s_node = ea->pa_dst_node; sa.s_net = ea->pa_dst_net; /* See if we have a matching proxy. */ ma = __aarp_proxy_find(dev, &sa); if (!ma) ma = &ifa->address; else { /* We need to make a copy of the entry. */ da.s_node = sa.s_node; da.s_net = sa.s_net; ma = &da; } if (function == AARP_PROBE) { /* * A probe implies someone trying to get an * address. So as a precaution flush any * entries we have for this address. */ a = __aarp_find_entry(resolved[sa.s_node % (AARP_HASH_SIZE - 1)], skb->dev, &sa); /* * Make it expire next tick - that avoids us * getting into a probe/flush/learn/probe/ * flush/learn cycle during probing of a slow * to respond host addr. */ if (a) { a->expires_at = jiffies - 1; mod_timer(&aarp_timer, jiffies + sysctl_aarp_tick_time); } } if (sa.s_node != ma->s_node) break; if (sa.s_net && ma->s_net && sa.s_net != ma->s_net) break; sa.s_node = ea->pa_src_node; sa.s_net = ea->pa_src_net; /* aarp_my_address has found the address to use for us. */ aarp_send_reply(dev, ma, &sa, ea->hw_src); break; } unlock: write_unlock_bh(&aarp_lock); out1: ret = 1; out0: kfree_skb(skb); return ret; } static struct notifier_block aarp_notifier = { .notifier_call = aarp_device_event, }; static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 }; void __init aarp_proto_init(void) { aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv); if (!aarp_dl) printk(KERN_CRIT "Unable to register AARP with SNAP.\n"); setup_timer(&aarp_timer, aarp_expire_timeout, 0); aarp_timer.expires = jiffies + sysctl_aarp_expiry_time; add_timer(&aarp_timer); register_netdevice_notifier(&aarp_notifier); } /* Remove the AARP entries associated with a device. */ void aarp_device_down(struct net_device *dev) { int ct; write_lock_bh(&aarp_lock); for (ct = 0; ct < AARP_HASH_SIZE; ct++) { __aarp_expire_device(&resolved[ct], dev); __aarp_expire_device(&unresolved[ct], dev); __aarp_expire_device(&proxies[ct], dev); } write_unlock_bh(&aarp_lock); } #ifdef CONFIG_PROC_FS struct aarp_iter_state { int bucket; struct aarp_entry **table; }; /* * Get the aarp entry that is in the chain described * by the iterator. * If pos is set then skip till that index. * pos = 1 is the first entry */ static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos) { int ct = iter->bucket; struct aarp_entry **table = iter->table; loff_t off = 0; struct aarp_entry *entry; rescan: while(ct < AARP_HASH_SIZE) { for (entry = table[ct]; entry; entry = entry->next) { if (!pos || ++off == *pos) { iter->table = table; iter->bucket = ct; return entry; } } ++ct; } if (table == resolved) { ct = 0; table = unresolved; goto rescan; } if (table == unresolved) { ct = 0; table = proxies; goto rescan; } return NULL; } static void *aarp_seq_start(struct seq_file *seq, loff_t *pos) __acquires(aarp_lock) { struct aarp_iter_state *iter = seq->private; read_lock_bh(&aarp_lock); iter->table = resolved; iter->bucket = 0; return *pos ? iter_next(iter, pos) : SEQ_START_TOKEN; } static void *aarp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct aarp_entry *entry = v; struct aarp_iter_state *iter = seq->private; ++*pos; /* first line after header */ if (v == SEQ_START_TOKEN) entry = iter_next(iter, NULL); /* next entry in current bucket */ else if (entry->next) entry = entry->next; /* next bucket or table */ else { ++iter->bucket; entry = iter_next(iter, NULL); } return entry; } static void aarp_seq_stop(struct seq_file *seq, void *v) __releases(aarp_lock) { read_unlock_bh(&aarp_lock); } static const char *dt2str(unsigned long ticks) { static char buf[32]; sprintf(buf, "%ld.%02ld", ticks / HZ, ((ticks % HZ) * 100 ) / HZ); return buf; } static int aarp_seq_show(struct seq_file *seq, void *v) { struct aarp_iter_state *iter = seq->private; struct aarp_entry *entry = v; unsigned long now = jiffies; if (v == SEQ_START_TOKEN) seq_puts(seq, "Address Interface Hardware Address" " Expires LastSend Retry Status\n"); else { seq_printf(seq, "%04X:%02X %-12s", ntohs(entry->target_addr.s_net), (unsigned int) entry->target_addr.s_node, entry->dev ? entry->dev->name : "????"); seq_printf(seq, "%pM", entry->hwaddr); seq_printf(seq, " %8s", dt2str((long)entry->expires_at - (long)now)); if (iter->table == unresolved) seq_printf(seq, " %8s %6hu", dt2str(now - entry->last_sent), entry->xmit_count); else seq_puts(seq, " "); seq_printf(seq, " %s\n", (iter->table == resolved) ? "resolved" : (iter->table == unresolved) ? "unresolved" : (iter->table == proxies) ? "proxies" : "unknown"); } return 0; } static const struct seq_operations aarp_seq_ops = { .start = aarp_seq_start, .next = aarp_seq_next, .stop = aarp_seq_stop, .show = aarp_seq_show, }; static int aarp_seq_open(struct inode *inode, struct file *file) { return seq_open_private(file, &aarp_seq_ops, sizeof(struct aarp_iter_state)); } const struct file_operations atalk_seq_arp_fops = { .owner = THIS_MODULE, .open = aarp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* General module cleanup. Called from cleanup_module() in ddp.c. */ void aarp_cleanup_module(void) { del_timer_sync(&aarp_timer); unregister_netdevice_notifier(&aarp_notifier); unregister_snap_client(aarp_dl); aarp_purge(); }
gpl-2.0
rooque/android_kernel_xiaomi_cancro
arch/tile/lib/checksum.c
7739
2331
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * Support code for the main lib/checksum.c. */ #include <net/checksum.h> #include <linux/module.h> static inline unsigned int longto16(unsigned long x) { unsigned long ret; #ifdef __tilegx__ ret = __insn_v2sadu(x, 0); ret = __insn_v2sadu(ret, 0); #else ret = __insn_sadh_u(x, 0); ret = __insn_sadh_u(ret, 0); #endif return ret; } __wsum do_csum(const unsigned char *buff, int len) { int odd, count; unsigned long result = 0; if (len <= 0) goto out; odd = 1 & (unsigned long) buff; if (odd) { result = (*buff << 8); len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(const unsigned short *)buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { #ifdef __tilegx__ if (4 & (unsigned long) buff) { unsigned int w = *(const unsigned int *)buff; result = __insn_v2sadau(result, w, 0); count--; len -= 4; buff += 4; } count >>= 1; /* nr of 64-bit words.. */ #endif /* * This algorithm could wrap around for very * large buffers, but those should be impossible. */ BUG_ON(count >= 65530); while (count) { unsigned long w = *(const unsigned long *)buff; count--; buff += sizeof(w); #ifdef __tilegx__ result = __insn_v2sadau(result, w, 0); #else result = __insn_sadah_u(result, w, 0); #endif } #ifdef __tilegx__ if (len & 4) { unsigned int w = *(const unsigned int *)buff; result = __insn_v2sadau(result, w, 0); buff += 4; } #endif } if (len & 2) { result += *(const unsigned short *) buff; buff += 2; } } if (len & 1) result += *buff; result = longto16(result); if (odd) result = swab16(result); out: return result; }
gpl-2.0
scanno/android_kernel_motorola_msm8992
drivers/infiniband/hw/qib/qib_cq.c
10555
12207
/* * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "qib_verbs.h" /** * qib_cq_enter - add a new entry to the completion queue * @cq: completion queue * @entry: work completion entry to add * @sig: true if @entry is a solicitated entry * * This may be called with qp->s_lock held. */ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) { struct qib_cq_wc *wc; unsigned long flags; u32 head; u32 next; spin_lock_irqsave(&cq->lock, flags); /* * Note that the head pointer might be writable by user processes. * Take care to verify it is a sane value. */ wc = cq->queue; head = wc->head; if (head >= (unsigned) cq->ibcq.cqe) { head = cq->ibcq.cqe; next = 0; } else next = head + 1; if (unlikely(next == wc->tail)) { spin_unlock_irqrestore(&cq->lock, flags); if (cq->ibcq.event_handler) { struct ib_event ev; ev.device = cq->ibcq.device; ev.element.cq = &cq->ibcq; ev.event = IB_EVENT_CQ_ERR; cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); } return; } if (cq->ip) { wc->uqueue[head].wr_id = entry->wr_id; wc->uqueue[head].status = entry->status; wc->uqueue[head].opcode = entry->opcode; wc->uqueue[head].vendor_err = entry->vendor_err; wc->uqueue[head].byte_len = entry->byte_len; wc->uqueue[head].ex.imm_data = (__u32 __force)entry->ex.imm_data; wc->uqueue[head].qp_num = entry->qp->qp_num; wc->uqueue[head].src_qp = entry->src_qp; wc->uqueue[head].wc_flags = entry->wc_flags; wc->uqueue[head].pkey_index = entry->pkey_index; wc->uqueue[head].slid = entry->slid; wc->uqueue[head].sl = entry->sl; wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; wc->uqueue[head].port_num = entry->port_num; /* Make sure entry is written before the head index. */ smp_wmb(); } else wc->kqueue[head] = *entry; wc->head = next; if (cq->notify == IB_CQ_NEXT_COMP || (cq->notify == IB_CQ_SOLICITED && (solicited || entry->status != IB_WC_SUCCESS))) { cq->notify = IB_CQ_NONE; cq->triggered++; /* * This will cause send_complete() to be called in * another thread. */ queue_work(qib_cq_wq, &cq->comptask); } spin_unlock_irqrestore(&cq->lock, flags); } /** * qib_poll_cq - poll for work completion entries * @ibcq: the completion queue to poll * @num_entries: the maximum number of entries to return * @entry: pointer to array where work completions are placed * * Returns the number of completion entries polled. * * This may be called from interrupt context. Also called by ib_poll_cq() * in the generic verbs code. */ int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) { struct qib_cq *cq = to_icq(ibcq); struct qib_cq_wc *wc; unsigned long flags; int npolled; u32 tail; /* The kernel can only poll a kernel completion queue */ if (cq->ip) { npolled = -EINVAL; goto bail; } spin_lock_irqsave(&cq->lock, flags); wc = cq->queue; tail = wc->tail; if (tail > (u32) cq->ibcq.cqe) tail = (u32) cq->ibcq.cqe; for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { if (tail == wc->head) break; /* The kernel doesn't need a RMB since it has the lock. */ *entry = wc->kqueue[tail]; if (tail >= cq->ibcq.cqe) tail = 0; else tail++; } wc->tail = tail; spin_unlock_irqrestore(&cq->lock, flags); bail: return npolled; } static void send_complete(struct work_struct *work) { struct qib_cq *cq = container_of(work, struct qib_cq, comptask); /* * The completion handler will most likely rearm the notification * and poll for all pending entries. If a new completion entry * is added while we are in this routine, queue_work() * won't call us again until we return so we check triggered to * see if we need to call the handler again. */ for (;;) { u8 triggered = cq->triggered; /* * IPoIB connected mode assumes the callback is from a * soft IRQ. We simulate this by blocking "bottom halves". * See the implementation for ipoib_cm_handle_tx_wc(), * netif_tx_lock_bh() and netif_tx_lock(). */ local_bh_disable(); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); local_bh_enable(); if (cq->triggered == triggered) return; } } /** * qib_create_cq - create a completion queue * @ibdev: the device this completion queue is attached to * @entries: the minimum size of the completion queue * @context: unused by the QLogic_IB driver * @udata: user data for libibverbs.so * * Returns a pointer to the completion queue or negative errno values * for failure. * * Called by ib_create_cq() in the generic verbs code. */ struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { struct qib_ibdev *dev = to_idev(ibdev); struct qib_cq *cq; struct qib_cq_wc *wc; struct ib_cq *ret; u32 sz; if (entries < 1 || entries > ib_qib_max_cqes) { ret = ERR_PTR(-EINVAL); goto done; } /* Allocate the completion queue structure. */ cq = kmalloc(sizeof(*cq), GFP_KERNEL); if (!cq) { ret = ERR_PTR(-ENOMEM); goto done; } /* * Allocate the completion queue entries and head/tail pointers. * This is allocated separately so that it can be resized and * also mapped into user space. * We need to use vmalloc() in order to support mmap and large * numbers of entries. */ sz = sizeof(*wc); if (udata && udata->outlen >= sizeof(__u64)) sz += sizeof(struct ib_uverbs_wc) * (entries + 1); else sz += sizeof(struct ib_wc) * (entries + 1); wc = vmalloc_user(sz); if (!wc) { ret = ERR_PTR(-ENOMEM); goto bail_cq; } /* * Return the address of the WC as the offset to mmap. * See qib_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { int err; cq->ip = qib_create_mmap_info(dev, sz, context, wc); if (!cq->ip) { ret = ERR_PTR(-ENOMEM); goto bail_wc; } err = ib_copy_to_udata(udata, &cq->ip->offset, sizeof(cq->ip->offset)); if (err) { ret = ERR_PTR(err); goto bail_ip; } } else cq->ip = NULL; spin_lock(&dev->n_cqs_lock); if (dev->n_cqs_allocated == ib_qib_max_cqs) { spin_unlock(&dev->n_cqs_lock); ret = ERR_PTR(-ENOMEM); goto bail_ip; } dev->n_cqs_allocated++; spin_unlock(&dev->n_cqs_lock); if (cq->ip) { spin_lock_irq(&dev->pending_lock); list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } /* * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. * The number of entries should be >= the number requested or return * an error. */ cq->ibcq.cqe = entries; cq->notify = IB_CQ_NONE; cq->triggered = 0; spin_lock_init(&cq->lock); INIT_WORK(&cq->comptask, send_complete); wc->head = 0; wc->tail = 0; cq->queue = wc; ret = &cq->ibcq; goto done; bail_ip: kfree(cq->ip); bail_wc: vfree(wc); bail_cq: kfree(cq); done: return ret; } /** * qib_destroy_cq - destroy a completion queue * @ibcq: the completion queue to destroy. * * Returns 0 for success. * * Called by ib_destroy_cq() in the generic verbs code. */ int qib_destroy_cq(struct ib_cq *ibcq) { struct qib_ibdev *dev = to_idev(ibcq->device); struct qib_cq *cq = to_icq(ibcq); flush_work(&cq->comptask); spin_lock(&dev->n_cqs_lock); dev->n_cqs_allocated--; spin_unlock(&dev->n_cqs_lock); if (cq->ip) kref_put(&cq->ip->ref, qib_release_mmap_info); else vfree(cq->queue); kfree(cq); return 0; } /** * qib_req_notify_cq - change the notification type for a completion queue * @ibcq: the completion queue * @notify_flags: the type of notification to request * * Returns 0 for success. * * This may be called from interrupt context. Also called by * ib_req_notify_cq() in the generic verbs code. */ int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) { struct qib_cq *cq = to_icq(ibcq); unsigned long flags; int ret = 0; spin_lock_irqsave(&cq->lock, flags); /* * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). */ if (cq->notify != IB_CQ_NEXT_COMP) cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && cq->queue->head != cq->queue->tail) ret = 1; spin_unlock_irqrestore(&cq->lock, flags); return ret; } /** * qib_resize_cq - change the size of the CQ * @ibcq: the completion queue * * Returns 0 for success. */ int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) { struct qib_cq *cq = to_icq(ibcq); struct qib_cq_wc *old_wc; struct qib_cq_wc *wc; u32 head, tail, n; int ret; u32 sz; if (cqe < 1 || cqe > ib_qib_max_cqes) { ret = -EINVAL; goto bail; } /* * Need to use vmalloc() if we want to support large #s of entries. */ sz = sizeof(*wc); if (udata && udata->outlen >= sizeof(__u64)) sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); else sz += sizeof(struct ib_wc) * (cqe + 1); wc = vmalloc_user(sz); if (!wc) { ret = -ENOMEM; goto bail; } /* Check that we can write the offset to mmap. */ if (udata && udata->outlen >= sizeof(__u64)) { __u64 offset = 0; ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); if (ret) goto bail_free; } spin_lock_irq(&cq->lock); /* * Make sure head and tail are sane since they * might be user writable. */ old_wc = cq->queue; head = old_wc->head; if (head > (u32) cq->ibcq.cqe) head = (u32) cq->ibcq.cqe; tail = old_wc->tail; if (tail > (u32) cq->ibcq.cqe) tail = (u32) cq->ibcq.cqe; if (head < tail) n = cq->ibcq.cqe + 1 + head - tail; else n = head - tail; if (unlikely((u32)cqe < n)) { ret = -EINVAL; goto bail_unlock; } for (n = 0; tail != head; n++) { if (cq->ip) wc->uqueue[n] = old_wc->uqueue[tail]; else wc->kqueue[n] = old_wc->kqueue[tail]; if (tail == (u32) cq->ibcq.cqe) tail = 0; else tail++; } cq->ibcq.cqe = cqe; wc->head = n; wc->tail = 0; cq->queue = wc; spin_unlock_irq(&cq->lock); vfree(old_wc); if (cq->ip) { struct qib_ibdev *dev = to_idev(ibcq->device); struct qib_mmap_info *ip = cq->ip; qib_update_mmap_info(dev, ip, sz, wc); /* * Return the offset to mmap. * See qib_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { ret = ib_copy_to_udata(udata, &ip->offset, sizeof(ip->offset)); if (ret) goto bail; } spin_lock_irq(&dev->pending_lock); if (list_empty(&ip->pending_mmaps)) list_add(&ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } ret = 0; goto bail; bail_unlock: spin_unlock_irq(&cq->lock); bail_free: vfree(wc); bail: return ret; }
gpl-2.0
tamlok/linux
drivers/net/ethernet/marvell/mvpp2.c
316
181865
/* * Driver for Marvell PPv2 network controller for Armada 375 SoC. * * Copyright (C) 2014 Marvell * * Marcin Wojtas <mw@semihalf.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/mbus.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/cpumask.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_address.h> #include <linux/phy.h> #include <linux/clk.h> #include <uapi/linux/ppp_defs.h> #include <net/ip.h> #include <net/ipv6.h> /* RX Fifo Registers */ #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 #define MVPP2_RX_FIFO_INIT_REG 0x64 /* RX DMA Top Registers */ #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) #define MVPP2_POOL_BUF_SIZE_OFFSET 5 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) #define MVPP2_RXQ_POOL_SHORT_OFFS 20 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000 #define MVPP2_RXQ_POOL_LONG_OFFS 24 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 #define MVPP2_RXQ_DISABLE_MASK BIT(31) /* Parser Registers */ #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 #define MVPP2_PRS_PORT_LU_MAX 0xf #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) #define MVPP2_PRS_TCAM_IDX_REG 0x1100 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) #define MVPP2_PRS_TCAM_INV_MASK BIT(31) #define MVPP2_PRS_SRAM_IDX_REG 0x1200 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) /* Classifier Registers */ #define MVPP2_CLS_MODE_REG 0x1800 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) #define MVPP2_CLS_PORT_WAY_REG 0x1810 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) #define MVPP2_CLS_LKP_INDEX_REG 0x1814 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 #define MVPP2_CLS_LKP_TBL_REG 0x1818 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) /* Descriptor Manager Top Registers */ #define MVPP2_RXQ_NUM_REG 0x2040 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 #define MVPP2_RXQ_NUM_NEW_OFFSET 16 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 #define MVPP2_RXQ_THRESH_REG 0x204c #define MVPP2_OCCUPIED_THRESH_OFFSET 0 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff #define MVPP2_RXQ_INDEX_REG 0x2050 #define MVPP2_TXQ_NUM_REG 0x2080 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 #define MVPP2_TXQ_THRESH_REG 0x2094 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 #define MVPP2_TXQ_INDEX_REG 0x2098 #define MVPP2_TXQ_PREF_BUF_REG 0x209c #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) #define MVPP2_TXQ_PENDING_REG 0x20a0 #define MVPP2_TXQ_PENDING_MASK 0x3fff #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) /* MBUS bridge registers */ #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) #define MVPP2_BASE_ADDR_ENABLE 0x4060 /* Interrupt Cause and Mask registers */ #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 /* Buffer Manager registers */ #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) #define MVPP2_BM_START_MASK BIT(0) #define MVPP2_BM_STOP_MASK BIT(1) #define MVPP2_BM_STATE_MASK BIT(4) #define MVPP2_BM_LOW_THRESH_OFFS 8 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ MVPP2_BM_LOW_THRESH_OFFS) #define MVPP2_BM_HIGH_THRESH_OFFS 16 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ MVPP2_BM_HIGH_THRESH_OFFS) #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) #define MVPP2_BM_BPPE_FULL_MASK BIT(3) #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) #define MVPP2_BM_VIRT_RLS_REG 0x64c0 #define MVPP2_BM_MC_RLS_REG 0x64c4 #define MVPP2_BM_MC_ID_MASK 0xfff #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) /* TX Scheduler registers */ #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 #define MVPP2_TXP_SCHED_MTU_REG 0x801c #define MVPP2_TXP_MTU_MAX 0x7FFFF #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff /* TX general registers */ #define MVPP2_TX_SNOOP_REG 0x8800 #define MVPP2_TX_PORT_FLUSH_REG 0x8810 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) /* LMS registers */ #define MVPP2_SRC_ADDR_MIDDLE 0x24 #define MVPP2_SRC_ADDR_HIGH 0x28 #define MVPP2_PHY_AN_CFG0_REG 0x34 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) #define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \ 0x400 + (port) * 0x400) #define MVPP2_MIB_LATE_COLLISION 0x7c #define MVPP2_ISR_SUM_MASK_REG 0x220c #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 /* Per-port registers */ #define MVPP2_GMAC_CTRL_0_REG 0x0 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) #define MVPP2_GMAC_CTRL_1_REG 0x4 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) #define MVPP2_GMAC_PCS_LB_EN_BIT 6 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) #define MVPP2_GMAC_SA_LOW_OFFS 7 #define MVPP2_GMAC_CTRL_2_REG 0x8 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) #define MVPP2_GMAC_AUTONEG_CONFIG 0xc #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVPP2_GMAC_AN_SPEED_EN BIT(7) #define MVPP2_GMAC_FC_ADV_EN BIT(9) #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff /* Descriptor ring Macros */ #define MVPP2_QUEUE_NEXT_DESC(q, index) \ (((index) < (q)->last_desc) ? ((index) + 1) : 0) /* Various constants */ /* Coalescing */ #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 #define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_USEC 100 /* The two bytes Marvell header. Either contains a special value used * by Marvell switches when a specific hardware mode is enabled (not * supported by this driver) or is filled automatically by zeroes on * the RX side. Those two bytes being at the front of the Ethernet * header, they allow to have the IP header aligned on a 4 bytes * boundary automatically: the hardware skips those two bytes on its * own. */ #define MVPP2_MH_SIZE 2 #define MVPP2_ETH_TYPE_LEN 2 #define MVPP2_PPPOE_HDR_SIZE 8 #define MVPP2_VLAN_TAG_LEN 4 /* Lbtd 802.3 type */ #define MVPP2_IP_LBDT_TYPE 0xfffa #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 #define MVPP2_TX_CSUM_MAX_SIZE 9800 /* Timeout constants */ #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 #define MVPP2_TX_MTU_MAX 0x7ffff /* Maximum number of T-CONTs of PON port */ #define MVPP2_MAX_TCONT 16 /* Maximum number of supported ports */ #define MVPP2_MAX_PORTS 4 /* Maximum number of TXQs used by single port */ #define MVPP2_MAX_TXQ 8 /* Maximum number of RXQs used by single port */ #define MVPP2_MAX_RXQ 8 /* Dfault number of RXQs in use */ #define MVPP2_DEFAULT_RXQ 4 /* Total number of RXQs available to all ports */ #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ) /* Max number of Rx descriptors */ #define MVPP2_MAX_RXD 128 /* Max number of Tx descriptors */ #define MVPP2_MAX_TXD 1024 /* Amount of Tx descriptors that can be reserved at once by CPU */ #define MVPP2_CPU_DESC_CHUNK 64 /* Max number of Tx descriptors in each aggregated queue */ #define MVPP2_AGGR_TXQ_SIZE 256 /* Descriptor aligned size */ #define MVPP2_DESC_ALIGNED_SIZE 32 /* Descriptor alignment mask */ #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) /* RX FIFO constants */ #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 /* RX buffer constants */ #define MVPP2_SKB_SHINFO_SIZE \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) #define MVPP2_RX_PKT_SIZE(mtu) \ ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) /* IPv6 max L3 address size */ #define MVPP2_MAX_L3_ADDR_SIZE 16 /* Port flags */ #define MVPP2_F_LOOPBACK BIT(0) /* Marvell tag types */ enum mvpp2_tag_type { MVPP2_TAG_TYPE_NONE = 0, MVPP2_TAG_TYPE_MH = 1, MVPP2_TAG_TYPE_DSA = 2, MVPP2_TAG_TYPE_EDSA = 3, MVPP2_TAG_TYPE_VLAN = 4, MVPP2_TAG_TYPE_LAST = 5 }; /* Parser constants */ #define MVPP2_PRS_TCAM_SRAM_SIZE 256 #define MVPP2_PRS_TCAM_WORDS 6 #define MVPP2_PRS_SRAM_WORDS 4 #define MVPP2_PRS_FLOW_ID_SIZE 64 #define MVPP2_PRS_FLOW_ID_MASK 0x3f #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) #define MVPP2_PRS_IPV4_HEAD 0x40 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 #define MVPP2_PRS_IPV4_MC 0xe0 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 #define MVPP2_PRS_IPV4_BC_MASK 0xff #define MVPP2_PRS_IPV4_IHL 0x5 #define MVPP2_PRS_IPV4_IHL_MASK 0xf #define MVPP2_PRS_IPV6_MC 0xff #define MVPP2_PRS_IPV6_MC_MASK 0xff #define MVPP2_PRS_IPV6_HOP_MASK 0xff #define MVPP2_PRS_TCAM_PROTO_MASK 0xff #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f #define MVPP2_PRS_DBL_VLANS_MAX 100 /* Tcam structure: * - lookup ID - 4 bits * - port ID - 1 byte * - additional information - 1 byte * - header data - 8 bytes * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). */ #define MVPP2_PRS_AI_BITS 8 #define MVPP2_PRS_PORT_MASK 0xff #define MVPP2_PRS_LU_MASK 0xf #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ (((offs) * 2) - ((offs) % 2) + 2) #define MVPP2_PRS_TCAM_AI_BYTE 16 #define MVPP2_PRS_TCAM_PORT_BYTE 17 #define MVPP2_PRS_TCAM_LU_BYTE 20 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) #define MVPP2_PRS_TCAM_INV_WORD 5 /* Tcam entries ID */ #define MVPP2_PE_DROP_ALL 0 #define MVPP2_PE_FIRST_FREE_TID 1 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) /* Sram structure * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). */ #define MVPP2_PRS_SRAM_RI_OFFS 0 #define MVPP2_PRS_SRAM_RI_WORD 0 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 #define MVPP2_PRS_SRAM_UDF_OFFS 73 #define MVPP2_PRS_SRAM_UDF_BITS 8 #define MVPP2_PRS_SRAM_UDF_MASK 0xff #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 #define MVPP2_PRS_SRAM_AI_OFFS 90 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 #define MVPP2_PRS_SRAM_AI_MASK 0xff #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 /* Sram result info bits assignment */ #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 #define MVPP2_PRS_RI_DSA_MASK 0x2 #define MVPP2_PRS_RI_VLAN_MASK 0xc #define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3)) #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) #define MVPP2_PRS_RI_L2_CAST_MASK 0x600 #define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) #define MVPP2_PRS_RI_L2_MCAST BIT(9) #define MVPP2_PRS_RI_L2_BCAST BIT(10) #define MVPP2_PRS_RI_PPPOE_MASK 0x800 #define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000 #define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14)) #define MVPP2_PRS_RI_L3_IP4 BIT(12) #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) #define MVPP2_PRS_RI_L3_IP6 BIT(14) #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) #define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000 #define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16)) #define MVPP2_PRS_RI_L3_MCAST BIT(15) #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 #define MVPP2_PRS_RI_UDF3_MASK 0x300000 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 #define MVPP2_PRS_RI_L4_TCP BIT(22) #define MVPP2_PRS_RI_L4_UDP BIT(23) #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) #define MVPP2_PRS_RI_DROP_MASK 0x80000000 /* Sram additional info bits assignment */ #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) #define MVPP2_PRS_SINGLE_VLAN_AI 0 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) /* DSA/EDSA type */ #define MVPP2_PRS_TAGGED true #define MVPP2_PRS_UNTAGGED false #define MVPP2_PRS_EDSA true #define MVPP2_PRS_DSA false /* MAC entries, shadow udf */ enum mvpp2_prs_udf { MVPP2_PRS_UDF_MAC_DEF, MVPP2_PRS_UDF_MAC_RANGE, MVPP2_PRS_UDF_L2_DEF, MVPP2_PRS_UDF_L2_DEF_COPY, MVPP2_PRS_UDF_L2_USER, }; /* Lookup ID */ enum mvpp2_prs_lookup { MVPP2_PRS_LU_MH, MVPP2_PRS_LU_MAC, MVPP2_PRS_LU_DSA, MVPP2_PRS_LU_VLAN, MVPP2_PRS_LU_L2, MVPP2_PRS_LU_PPPOE, MVPP2_PRS_LU_IP4, MVPP2_PRS_LU_IP6, MVPP2_PRS_LU_FLOWS, MVPP2_PRS_LU_LAST, }; /* L3 cast enum */ enum mvpp2_prs_l3_cast { MVPP2_PRS_L3_UNI_CAST, MVPP2_PRS_L3_MULTI_CAST, MVPP2_PRS_L3_BROAD_CAST }; /* Classifier constants */ #define MVPP2_CLS_FLOWS_TBL_SIZE 512 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 #define MVPP2_CLS_LKP_TBL_SIZE 64 /* BM constants */ #define MVPP2_BM_POOLS_NUM 8 #define MVPP2_BM_LONG_BUF_NUM 1024 #define MVPP2_BM_SHORT_BUF_NUM 2048 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) #define MVPP2_BM_POOL_PTR_ALIGN 128 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port) #define MVPP2_BM_SWF_SHORT_POOL 3 /* BM cookie (32 bits) definition */ #define MVPP2_BM_COOKIE_POOL_OFFS 8 #define MVPP2_BM_COOKIE_CPU_OFFS 24 /* BM short pool packet size * These value assure that for SWF the total number * of bytes allocated for each buffer will be 512 */ #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) enum mvpp2_bm_type { MVPP2_BM_FREE, MVPP2_BM_SWF_LONG, MVPP2_BM_SWF_SHORT }; /* Definitions */ /* Shared Packet Processor resources */ struct mvpp2 { /* Shared registers' base addresses */ void __iomem *base; void __iomem *lms_base; /* Common clocks */ struct clk *pp_clk; struct clk *gop_clk; /* List of pointers to port structures */ struct mvpp2_port **port_list; /* Aggregated TXQs */ struct mvpp2_tx_queue *aggr_txqs; /* BM pools */ struct mvpp2_bm_pool *bm_pools; /* PRS shadow table */ struct mvpp2_prs_shadow *prs_shadow; /* PRS auxiliary table for double vlan entries control */ bool *prs_double_vlans; /* Tclk value */ u32 tclk; }; struct mvpp2_pcpu_stats { struct u64_stats_sync syncp; u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; }; struct mvpp2_port { u8 id; int irq; struct mvpp2 *priv; /* Per-port registers' base address */ void __iomem *base; struct mvpp2_rx_queue **rxqs; struct mvpp2_tx_queue **txqs; struct net_device *dev; int pkt_size; u32 pending_cause_rx; struct napi_struct napi; /* Flags */ unsigned long flags; u16 tx_ring_size; u16 rx_ring_size; struct mvpp2_pcpu_stats __percpu *stats; struct phy_device *phy_dev; phy_interface_t phy_interface; struct device_node *phy_node; unsigned int link; unsigned int duplex; unsigned int speed; struct mvpp2_bm_pool *pool_long; struct mvpp2_bm_pool *pool_short; /* Index of first port's physical RXQ */ u8 first_rxq; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the * layout of the transmit and reception DMA descriptors, and their * layout is therefore defined by the hardware design */ #define MVPP2_TXD_L3_OFF_SHIFT 0 #define MVPP2_TXD_IP_HLEN_SHIFT 8 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) #define MVPP2_TXD_L4_CSUM_NOT BIT(14) #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) #define MVPP2_TXD_PADDING_DISABLE BIT(23) #define MVPP2_TXD_L4_UDP BIT(24) #define MVPP2_TXD_L3_IP6 BIT(26) #define MVPP2_TXD_L_DESC BIT(28) #define MVPP2_TXD_F_DESC BIT(29) #define MVPP2_RXD_ERR_SUMMARY BIT(15) #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) #define MVPP2_RXD_ERR_CRC 0x0 #define MVPP2_RXD_ERR_OVERRUN BIT(13) #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) #define MVPP2_RXD_BM_POOL_ID_OFFS 16 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) #define MVPP2_RXD_HWF_SYNC BIT(21) #define MVPP2_RXD_L4_CSUM_OK BIT(22) #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) #define MVPP2_RXD_L4_TCP BIT(25) #define MVPP2_RXD_L4_UDP BIT(26) #define MVPP2_RXD_L3_IP4 BIT(28) #define MVPP2_RXD_L3_IP6 BIT(30) #define MVPP2_RXD_BUF_HDR BIT(31) struct mvpp2_tx_desc { u32 command; /* Options used by HW for packet transmitting.*/ u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ u16 data_size; /* data size of transmitted packet in bytes */ u32 buf_phys_addr; /* physical addr of transmitted buffer */ u32 buf_cookie; /* cookie for access to TX buffer in tx path */ u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ u32 reserved2; /* reserved (for future use) */ }; struct mvpp2_rx_desc { u32 status; /* info about received packet */ u16 reserved1; /* parser_info (for future use, PnC) */ u16 data_size; /* size of received packet in bytes */ u32 buf_phys_addr; /* physical address of the buffer */ u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved2; /* gem_port_id (for future use, PON) */ u16 reserved3; /* csum_l4 (for future use, PnC) */ u8 reserved4; /* bm_qset (for future use, BM) */ u8 reserved5; u16 reserved6; /* classify_info (for future use, PnC) */ u32 reserved7; /* flow_id (for future use, PnC) */ u32 reserved8; }; /* Per-CPU Tx queue control */ struct mvpp2_txq_pcpu { int cpu; /* Number of Tx DMA descriptors in the descriptor ring */ int size; /* Number of currently used Tx DMA descriptor in the * descriptor ring */ int count; /* Number of Tx DMA descriptors reserved for each CPU */ int reserved_num; /* Array of transmitted skb */ struct sk_buff **tx_skb; /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; /* Index of the TX DMA descriptor to be cleaned up */ int txq_get_index; }; struct mvpp2_tx_queue { /* Physical number of this Tx queue */ u8 id; /* Logical number of this Tx queue */ u8 log_id; /* Number of Tx DMA descriptors in the descriptor ring */ int size; /* Number of currently used Tx DMA descriptor in the descriptor ring */ int count; /* Per-CPU control of physical Tx queues */ struct mvpp2_txq_pcpu __percpu *pcpu; /* Array of transmitted skb */ struct sk_buff **tx_skb; u32 done_pkts_coal; /* Virtual address of thex Tx DMA descriptors array */ struct mvpp2_tx_desc *descs; /* DMA address of the Tx DMA descriptors array */ dma_addr_t descs_phys; /* Index of the last Tx DMA descriptor */ int last_desc; /* Index of the next Tx DMA descriptor to process */ int next_desc_to_proc; }; struct mvpp2_rx_queue { /* RX queue number, in the range 0-31 for physical RXQs */ u8 id; /* Num of rx descriptors in the rx descriptor ring */ int size; u32 pkts_coal; u32 time_coal; /* Virtual address of the RX DMA descriptors array */ struct mvpp2_rx_desc *descs; /* DMA address of the RX DMA descriptors array */ dma_addr_t descs_phys; /* Index of the last RX DMA descriptor */ int last_desc; /* Index of the next RX DMA descriptor to process */ int next_desc_to_proc; /* ID of port to which physical RXQ is mapped */ int port; /* Port's logic RXQ number to which physical RXQ is mapped */ int logic_rxq; }; union mvpp2_prs_tcam_entry { u32 word[MVPP2_PRS_TCAM_WORDS]; u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; }; union mvpp2_prs_sram_entry { u32 word[MVPP2_PRS_SRAM_WORDS]; u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; }; struct mvpp2_prs_entry { u32 index; union mvpp2_prs_tcam_entry tcam; union mvpp2_prs_sram_entry sram; }; struct mvpp2_prs_shadow { bool valid; bool finish; /* Lookup ID */ int lu; /* User defined offset */ int udf; /* Result info */ u32 ri; u32 ri_mask; }; struct mvpp2_cls_flow_entry { u32 index; u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; }; struct mvpp2_cls_lookup_entry { u32 lkpid; u32 way; u32 data; }; struct mvpp2_bm_pool { /* Pool number in the range 0-7 */ int id; enum mvpp2_bm_type type; /* Buffer Pointers Pool External (BPPE) size */ int size; /* Number of buffers for this pool */ int buf_num; /* Pool buffer size */ int buf_size; /* Packet size */ int pkt_size; /* BPPE virtual base address */ u32 *virt_addr; /* BPPE physical base address */ dma_addr_t phys_addr; /* Ports using BM pool */ u32 port_map; /* Occupied buffers indicator */ atomic_t in_use; int in_use_thresh; spinlock_t lock; }; struct mvpp2_buff_hdr { u32 next_buff_phys_addr; u32 next_buff_virt_addr; u16 byte_count; u16 info; u8 reserved1; /* bm_qset (for future use, BM) */ }; /* Buffer header info bits */ #define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff #define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK) #define MVPP2_B_HDR_INFO_LAST_OFFS 12 #define MVPP2_B_HDR_INFO_LAST_MASK BIT(12) #define MVPP2_B_HDR_INFO_IS_LAST(info) \ ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS) /* Static declaractions */ /* Number of RXQs used by single port */ static int rxq_number = MVPP2_DEFAULT_RXQ; /* Number of TXQs used by single port */ static int txq_number = MVPP2_MAX_TXQ; #define MVPP2_DRIVER_NAME "mvpp2" #define MVPP2_DRIVER_VERSION "1.0" /* Utility/helper methods */ static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) { writel(data, priv->base + offset); } static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) { return readl(priv->base + offset); } static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) { txq_pcpu->txq_get_index++; if (txq_pcpu->txq_get_index == txq_pcpu->size) txq_pcpu->txq_get_index = 0; } static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, struct sk_buff *skb) { txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; } /* Get number of physical egress port */ static inline int mvpp2_egress_port(struct mvpp2_port *port) { return MVPP2_MAX_TCONT + port->id; } /* Get number of physical TXQ */ static inline int mvpp2_txq_phys(int port, int txq) { return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; } /* Parser configuration routines */ /* Update parser tcam and sram hw entries */ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i; if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; /* Clear entry invalidation bit */ pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); return 0; } /* Read tcam entry from hw */ static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i; if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; /* Write tcam index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) return MVPP2_PRS_TCAM_ENTRY_INVALID; for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); return 0; } /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { /* Write index - indirect access */ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), MVPP2_PRS_TCAM_INV_MASK); } /* Enable shadow table entry and set its lookup ID */ static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) { priv->prs_shadow[index].valid = true; priv->prs_shadow[index].lu = lu; } /* Update ri fields in shadow table entry */ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, unsigned int ri, unsigned int ri_mask) { priv->prs_shadow[index].ri_mask = ri_mask; priv->prs_shadow[index].ri = ri; } /* Update lookup field in tcam sw entry */ static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) { int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; } /* Update mask for single port in tcam sw entry */ static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, unsigned int port, bool add) { int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); if (add) pe->tcam.byte[enable_off] &= ~(1 << port); else pe->tcam.byte[enable_off] |= 1 << port; } /* Update port map in tcam sw entry */ static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, unsigned int ports) { unsigned char port_mask = MVPP2_PRS_PORT_MASK; int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; pe->tcam.byte[enable_off] &= ~port_mask; pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; } /* Obtain port map from tcam sw entry */ static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) { int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; } /* Set byte of data and its enable bits in tcam sw entry */ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char byte, unsigned char enable) { pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; } /* Get byte of data and its enable bits from tcam sw entry */ static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, unsigned int offs, unsigned char *byte, unsigned char *enable) { *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; } /* Compare tcam data bytes with a pattern */ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, u16 data) { int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); u16 tcam_data; tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off]; if (tcam_data != data) return false; return true; } /* Update ai bits in tcam sw entry */ static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int enable) { int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { if (!(enable & BIT(i))) continue; if (bits & BIT(i)) pe->tcam.byte[ai_idx] |= 1 << i; else pe->tcam.byte[ai_idx] &= ~(1 << i); } pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; } /* Get ai bits from tcam sw entry */ static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) { return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; } /* Set ethertype in tcam sw entry */ static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, unsigned short ethertype) { mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); } /* Set bits in sram sw entry */ static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, int val) { pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); } /* Clear bits in sram sw entry */ static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, int val) { pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); } /* Update ri bits in sram sw entry */ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int mask) { unsigned int i; for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { int ri_off = MVPP2_PRS_SRAM_RI_OFFS; if (!(mask & BIT(i))) continue; if (bits & BIT(i)) mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); else mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); } } /* Obtain ri bits from sram sw entry */ static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) { return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; } /* Update ai bits in sram sw entry */ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, unsigned int bits, unsigned int mask) { unsigned int i; int ai_off = MVPP2_PRS_SRAM_AI_OFFS; for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { if (!(mask & BIT(i))) continue; if (bits & BIT(i)) mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); else mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); } } /* Read ai bits from sram sw entry */ static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) { u8 bits; int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); int ai_en_off = ai_off + 1; int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; bits = (pe->sram.byte[ai_off] >> ai_shift) | (pe->sram.byte[ai_en_off] << (8 - ai_shift)); return bits; } /* In sram sw entry set lookup ID field of the tcam key to be used in the next * lookup interation */ static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) { int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK); mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); } /* In the sram sw entry set sign and value of the next lookup offset * and the offset value generated to the classifier */ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, unsigned int op) { /* Set sign */ if (shift < 0) { mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); shift = 0 - shift; } else { mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); } /* Set value */ pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = (unsigned char)shift; /* Reset and set operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); /* Set base offset as current */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); } /* In the sram sw entry set sign and value of the user defined offset * generated to the classifier */ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, unsigned int type, int offset, unsigned int op) { /* Set sign */ if (offset < 0) { mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); offset = 0 - offset; } else { mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); } /* Set value */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, MVPP2_PRS_SRAM_UDF_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + MVPP2_PRS_SRAM_UDF_BITS)] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + MVPP2_PRS_SRAM_UDF_BITS)] |= (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); /* Set offset type */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, MVPP2_PRS_SRAM_UDF_TYPE_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); /* Set offset operation */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); /* Set base offset as current */ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); } /* Find parser flow entry */ static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) { struct mvpp2_prs_entry *pe; int tid; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return NULL; mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { u8 bits; if (!priv->prs_shadow[tid].valid || priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; pe->index = tid; mvpp2_prs_hw_read(priv, pe); bits = mvpp2_prs_sram_ai_get(pe); /* Sram store classification lookup ID in AI bits [5:0] */ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) return pe; } kfree(pe); return NULL; } /* Return first free tcam index, seeking from start to end */ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, unsigned char end) { int tid; if (start > end) swap(start, end); if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; for (tid = start; tid <= end; tid++) { if (!priv->prs_shadow[tid].valid) return tid; } return -EINVAL; } /* Enable/disable dropping all mac da's */ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) { struct mvpp2_prs_entry pe; if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ pe.index = MVPP2_PE_DROP_ALL; mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = MVPP2_PE_DROP_ALL; /* Non-promiscuous mode for all ports - DROP unknown packets */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Set port to promiscuous mode */ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) { struct mvpp2_prs_entry pe; /* Promiscuous mode - Accept unknown packets */ if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { /* Entry exist - update port only */ pe.index = MVPP2_PE_MAC_PROMISCUOUS; mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = MVPP2_PE_MAC_PROMISCUOUS; /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); /* Set result info bits */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, MVPP2_PRS_RI_L2_CAST_MASK); /* Shift to ethertype */ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Accept multicast */ static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, bool add) { struct mvpp2_prs_entry pe; unsigned char da_mc; /* Ethernet multicast address first byte is * 0x01 for IPv4 and 0x33 for IPv6 */ da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; if (priv->prs_shadow[index].valid) { /* Entry exist - update port only */ pe.index = index; mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); pe.index = index; /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); /* Set result info bits */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, MVPP2_PRS_RI_L2_CAST_MASK); /* Update tcam entry data first byte */ mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); /* Shift to ethertype */ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) { struct mvpp2_prs_entry pe; int tid, shift; if (extend) { tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; shift = 8; } else { tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; shift = 4; } if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ pe.index = tid; mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); if (tagged) { /* Set tagged bit in DSA tag */ mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_TCAM_DSA_TAGGED_BIT, MVPP2_PRS_TCAM_DSA_TAGGED_BIT); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); /* If packet is tagged continue check vlans */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); } else { /* Set result info bits to 'no vlans' */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); } /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Set entry for dsa ethertype */ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) { struct mvpp2_prs_entry pe; int tid, shift, port_mask; if (extend) { tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : MVPP2_PE_ETYPE_EDSA_UNTAGGED; port_mask = 0; shift = 8; } else { tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : MVPP2_PE_ETYPE_DSA_UNTAGGED; port_mask = MVPP2_PRS_PORT_MASK; shift = 4; } if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ pe.index = tid; mvpp2_prs_hw_read(priv, &pe); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = tid; /* Set ethertype */ mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); mvpp2_prs_match_etype(&pe, 2, 0); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, MVPP2_PRS_RI_DSA_MASK); /* Shift ethertype + 2 byte reserved + tag*/ mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); if (tagged) { /* Set tagged bit in DSA tag */ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN + 2 + 3, MVPP2_PRS_TCAM_DSA_TAGGED_BIT, MVPP2_PRS_TCAM_DSA_TAGGED_BIT); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); /* If packet is tagged continue check vlans */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); } else { /* Set result info bits to 'no vlans' */ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); } /* Mask/unmask all ports, depending on dsa type */ mvpp2_prs_tcam_port_map_set(&pe, port_mask); } /* Update port mask */ mvpp2_prs_tcam_port_set(&pe, port, add); mvpp2_prs_hw_write(priv, &pe); } /* Search for existing single/triple vlan entry */ static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) { struct mvpp2_prs_entry *pe; int tid; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return NULL; mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); /* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { unsigned int ri_bits, ai_bits; bool match; if (!priv->prs_shadow[tid].valid || priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; pe->index = tid; mvpp2_prs_hw_read(priv, pe); match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid)); if (!match) continue; /* Get vlan type */ ri_bits = mvpp2_prs_sram_ri_get(pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; /* Get current ai value from tcam */ ai_bits = mvpp2_prs_tcam_ai_get(pe); /* Clear double vlan bit */ ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; if (ai != ai_bits) continue; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) return pe; } kfree(pe); return NULL; } /* Add/update single/triple vlan entry */ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, unsigned int port_map) { struct mvpp2_prs_entry *pe; int tid_aux, tid; int ret = 0; pe = mvpp2_prs_vlan_find(priv, tpid, ai); if (!pe) { /* Create new tcam entry */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); if (tid < 0) return tid; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return -ENOMEM; /* Get last double vlan tid */ for (tid_aux = MVPP2_PE_LAST_FREE_TID; tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { unsigned int ri_bits; if (!priv->prs_shadow[tid_aux].valid || priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; pe->index = tid_aux; mvpp2_prs_hw_read(priv, pe); ri_bits = mvpp2_prs_sram_ri_get(pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) break; } if (tid <= tid_aux) { ret = -EINVAL; goto error; } memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); pe->index = tid; mvpp2_prs_match_etype(pe, 0, tpid); mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2); /* Shift 4 bytes - skip 1 vlan tag */ mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Clear all ai bits for next iteration */ mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK); if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE, MVPP2_PRS_RI_VLAN_MASK); } else { ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE, MVPP2_PRS_RI_VLAN_MASK); } mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); error: kfree(pe); return ret; } /* Get first free double vlan ai number */ static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) { int i; for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { if (!priv->prs_double_vlans[i]) return i; } return -EINVAL; } /* Search for existing double vlan entry */ static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, unsigned short tpid2) { struct mvpp2_prs_entry *pe; int tid; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return NULL; mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); /* Go through the all entries with MVPP2_PRS_LU_VLAN */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { unsigned int ri_mask; bool match; if (!priv->prs_shadow[tid].valid || priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; pe->index = tid; mvpp2_prs_hw_read(priv, pe); match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1)) && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2)); if (!match) continue; ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK; if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) return pe; } kfree(pe); return NULL; } /* Add or update double vlan entry */ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, unsigned short tpid2, unsigned int port_map) { struct mvpp2_prs_entry *pe; int tid_aux, tid, ai, ret = 0; pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); if (!pe) { /* Create new tcam entry */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return -ENOMEM; /* Set ai value for new double vlan entry */ ai = mvpp2_prs_double_vlan_ai_free_get(priv); if (ai < 0) { ret = ai; goto error; } /* Get first single/triple vlan tid */ for (tid_aux = MVPP2_PE_FIRST_FREE_TID; tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { unsigned int ri_bits; if (!priv->prs_shadow[tid_aux].valid || priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; pe->index = tid_aux; mvpp2_prs_hw_read(priv, pe); ri_bits = mvpp2_prs_sram_ri_get(pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) break; } if (tid >= tid_aux) { ret = -ERANGE; goto error; } memset(pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); pe->index = tid; priv->prs_double_vlans[ai] = true; mvpp2_prs_match_etype(pe, 0, tpid1); mvpp2_prs_match_etype(pe, 4, tpid2); mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN); /* Shift 8 bytes - skip 2 vlan tags */ mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK); mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(pe, port_map); mvpp2_prs_hw_write(priv, pe); error: kfree(pe); return ret; } /* IPv4 header parsing for fragmentation and L4 offset */ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, unsigned int ri, unsigned int ri_mask) { struct mvpp2_prs_entry pe; int tid; if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && (proto != IPPROTO_IGMP)) return -EINVAL; /* Fragmented packet */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = tid; /* Set next lu to IPv4 */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L4 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, sizeof(struct iphdr) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Not fragmented packet */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; pe.index = tid; /* Clear ri before updating */ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); return 0; } /* IPv4 L3 multicast or broadcast */ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) { struct mvpp2_prs_entry pe; int mask, tid; tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = tid; switch (l3_cast) { case MVPP2_PRS_L3_MULTI_CAST: mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, MVPP2_PRS_IPV4_MC_MASK); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, MVPP2_PRS_RI_L3_ADDR_MASK); break; case MVPP2_PRS_L3_BROAD_CAST: mask = MVPP2_PRS_IPV4_BC_MASK; mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, MVPP2_PRS_RI_L3_ADDR_MASK); break; default: return -EINVAL; } /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Set entries for protocols over IPv6 */ static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, unsigned int ri, unsigned int ri_mask) { struct mvpp2_prs_entry pe; int tid; if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) return -EINVAL; tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, sizeof(struct ipv6hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Write HW */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); mvpp2_prs_hw_write(priv, &pe); return 0; } /* IPv6 L3 multicast entry */ static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) { struct mvpp2_prs_entry pe; int tid; if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) return -EINVAL; tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, MVPP2_PRS_RI_L3_ADDR_MASK); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Shift back to IPv6 NH */ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, MVPP2_PRS_IPV6_MC_MASK); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Parser per-port initialization */ static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, int lu_max, int offset) { u32 val; /* Set lookup ID */ val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); val &= ~MVPP2_PRS_PORT_LU_MASK(port); val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); /* Set maximum number of loops for packet received from port */ val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); /* Set initial offset for packet header extraction for the first * searching loop */ val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); val &= ~MVPP2_PRS_INIT_OFF_MASK(port); val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); } /* Default flow entries initialization for all ports */ static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int port; for (port = 0; port < MVPP2_MAX_PORTS; port++) { memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); /* Set flow ID*/ mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); mvpp2_prs_hw_write(priv, &pe); } } /* Set default entry for Marvell Header field */ static void mvpp2_prs_mh_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); pe.index = MVPP2_PE_MH_DEFAULT; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); mvpp2_prs_hw_write(priv, &pe); } /* Set default entires (place holder) for promiscuous, non-promiscuous and * multicast MAC addresses */ static void mvpp2_prs_mac_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); /* Non-promiscuous mode for all ports - DROP unknown packets */ pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); mvpp2_prs_hw_write(priv, &pe); /* place holders only - no ports */ mvpp2_prs_mac_drop_all_set(priv, 0, false); mvpp2_prs_mac_promisc_set(priv, 0, false); mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); } /* Set default entries for various types of dsa packets */ static void mvpp2_prs_dsa_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; /* None tagged EDSA entry - place holder */ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); /* Tagged EDSA entry - place holder */ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); /* None tagged DSA entry - place holder */ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); /* Tagged DSA entry - place holder */ mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); /* None tagged EDSA ethertype entry - place holder*/ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); /* Tagged EDSA ethertype entry - place holder*/ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); /* None tagged DSA ethertype entry */ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); /* Tagged DSA ethertype entry */ mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); /* Set default entry, in case DSA or EDSA tag not found */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); pe.index = MVPP2_PE_DSA_DEFAULT; mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); /* Shift 0 bytes */ mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); /* Clear all sram ai bits for next iteration */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe); } /* Match basic ethertypes */ static int mvpp2_prs_etype_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int tid; /* Ethertype: PPPoE */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, MVPP2_PRS_RI_PPPOE_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = false; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, MVPP2_PRS_RI_PPPOE_MASK); mvpp2_prs_hw_write(priv, &pe); /* Ethertype: ARP */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); /* Generate flow in the next iteration*/ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, MVPP2_PRS_RI_L3_PROTO_MASK); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = true; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); /* Ethertype: LBTD */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); /* Generate flow in the next iteration*/ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = true; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK); mvpp2_prs_hw_write(priv, &pe); /* Ethertype: IPv4 without options */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); /* Skip eth_type + 4 bytes of IP header */ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = false; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); /* Ethertype: IPv4 with options */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; pe.index = tid; /* Clear tcam data before updating */ pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK); /* Clear ri before updating */ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = false; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); /* Ethertype: IPv6 without options */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); /* Skip DIP of IPV6 header */ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = false; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); pe.index = MVPP2_PE_ETH_TYPE_UN; /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Generate flow in the next iteration*/ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); /* Set L3 offset even it's unknown L3 */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; priv->prs_shadow[pe.index].finish = true; mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Configure vlan entries and detect up to 2 successive VLAN tags. * Possible options: * 0x8100, 0x88A8 * 0x8100, 0x8100 * 0x8100 * 0x88A8 */ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int err; priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), MVPP2_PRS_DBL_VLANS_MAX, GFP_KERNEL); if (!priv->prs_double_vlans) return -ENOMEM; /* Double VLAN: 0x8100, 0x88A8 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, MVPP2_PRS_PORT_MASK); if (err) return err; /* Double VLAN: 0x8100, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); if (err) return err; /* Single VLAN: 0x88a8 */ err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, MVPP2_PRS_PORT_MASK); if (err) return err; /* Single VLAN: 0x8100 */ err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, MVPP2_PRS_PORT_MASK); if (err) return err; /* Set default double vlan entry */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_DBL; mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); /* Clear ai for next iterations */ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_DBL_VLAN_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); mvpp2_prs_hw_write(priv, &pe); /* Set default vlan none entry */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); pe.index = MVPP2_PE_VLAN_NONE; mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Set entries for PPPoE ethertype */ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int tid; /* IPv4 over PPPoE with options */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, PPP_IP); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); /* Skip eth_type + 4 bytes of IP header */ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); mvpp2_prs_hw_write(priv, &pe); /* IPv4 over PPPoE without options */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; pe.index = tid; mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK); /* Clear ri before updating */ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); mvpp2_prs_hw_write(priv, &pe); /* IPv6 over PPPoE */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); /* Skip eth_type + 4 bytes of IPv6 header */ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L3 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); mvpp2_prs_hw_write(priv, &pe); /* Non-IP over PPPoE */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); pe.index = tid; mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); /* Set L3 offset even if it's unknown L3 */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Initialize entries for IPv4 */ static int mvpp2_prs_ip4_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int err; /* Set entries for TCP, UDP and IGMP over IPv4 */ err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err; err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err; err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK); if (err) return err; /* IPv4 Broadcast */ err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); if (err) return err; /* IPv4 Multicast */ err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); if (err) return err; /* Default IPv4 entry for unknown protocols */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = MVPP2_PE_IP4_PROTO_UN; /* Set next lu to IPv4 */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Set L4 offset */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, sizeof(struct iphdr) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Default IPv4 entry for unicast address */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); pe.index = MVPP2_PE_IP4_ADDR_UN; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, MVPP2_PRS_RI_L3_ADDR_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Initialize entries for IPv6 */ static int mvpp2_prs_ip6_init(struct mvpp2 *priv) { struct mvpp2_prs_entry pe; int tid, err; /* Set entries for TCP, UDP and ICMP over IPv6 */ err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err; err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_RI_L4_PROTO_MASK); if (err) return err; err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK); if (err) return err; /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ /* Result Info: UDF7=1, DS lite */ err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE, MVPP2_PRS_RI_UDF7_MASK); if (err) return err; /* IPv6 multicast */ err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); if (err) return err; /* Entry for checking hop limit */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); if (tid < 0) return tid; memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = tid; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK); mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Default IPv6 entry for unknown protocols */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = MVPP2_PE_IP6_PROTO_UN; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); /* Set L4 offset relatively to our current place */ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, sizeof(struct ipv6hdr) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Default IPv6 entry for unknown ext protocols */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; /* Finished: go to flowid generation */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, MVPP2_PRS_IPV6_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); mvpp2_prs_hw_write(priv, &pe); /* Default IPv6 entry for unicast address */ memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); pe.index = MVPP2_PE_IP6_ADDR_UN; /* Finished: go to IPv6 again */ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, MVPP2_PRS_RI_L3_ADDR_MASK); mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Shift back to IPV6 NH */ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); /* Unmask all ports */ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); /* Update shadow table and hw entry */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); mvpp2_prs_hw_write(priv, &pe); return 0; } /* Parser default initialization */ static int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i; /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); /* Clear all tcam and sram entries */ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); } /* Invalidate all tcam entries */ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index); priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, sizeof(struct mvpp2_prs_shadow), GFP_KERNEL); if (!priv->prs_shadow) return -ENOMEM; /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, MVPP2_PRS_PORT_LU_MAX, 0); mvpp2_prs_def_flow_init(priv); mvpp2_prs_mh_init(priv); mvpp2_prs_mac_init(priv); mvpp2_prs_dsa_init(priv); err = mvpp2_prs_etype_init(priv); if (err) return err; err = mvpp2_prs_vlan_init(pdev, priv); if (err) return err; err = mvpp2_prs_pppoe_init(priv); if (err) return err; err = mvpp2_prs_ip6_init(priv); if (err) return err; err = mvpp2_prs_ip4_init(priv); if (err) return err; return 0; } /* Compare MAC DA with tcam entry data */ static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const u8 *da, unsigned char *mask) { unsigned char tcam_byte, tcam_mask; int index; for (index = 0; index < ETH_ALEN; index++) { mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); if (tcam_mask != mask[index]) return false; if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) return false; } return true; } /* Find tcam entry with matched pair <MAC DA, port> */ static struct mvpp2_prs_entry * mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, unsigned char *mask, int udf_type) { struct mvpp2_prs_entry *pe; int tid; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return NULL; mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); /* Go through the all entires with MVPP2_PRS_LU_MAC */ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { unsigned int entry_pmap; if (!priv->prs_shadow[tid].valid || (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || (priv->prs_shadow[tid].udf != udf_type)) continue; pe->index = tid; mvpp2_prs_hw_read(priv, pe); entry_pmap = mvpp2_prs_tcam_port_map_get(pe); if (mvpp2_prs_mac_range_equals(pe, da, mask) && entry_pmap == pmap) return pe; } kfree(pe); return NULL; } /* Update parser's mac da entry */ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, const u8 *da, bool add) { struct mvpp2_prs_entry *pe; unsigned int pmap, len, ri; unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; int tid; /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, MVPP2_PRS_UDF_MAC_DEF); /* No such entry */ if (!pe) { if (!add) return 0; /* Create new TCAM entry */ /* Find first range mac entry*/ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) if (priv->prs_shadow[tid].valid && (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && (priv->prs_shadow[tid].udf == MVPP2_PRS_UDF_MAC_RANGE)) break; /* Go through the all entries from first to last */ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, tid - 1); if (tid < 0) return tid; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return -1; mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); pe->index = tid; /* Mask all ports */ mvpp2_prs_tcam_port_map_set(pe, 0); } /* Update port mask */ mvpp2_prs_tcam_port_set(pe, port, add); /* Invalidate the entry if no ports are left enabled */ pmap = mvpp2_prs_tcam_port_map_get(pe); if (pmap == 0) { if (add) { kfree(pe); return -1; } mvpp2_prs_hw_inv(priv, pe->index); priv->prs_shadow[pe->index].valid = false; kfree(pe); return 0; } /* Continue - set next lookup */ mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); /* Set match on DA */ len = ETH_ALEN; while (len--) mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); /* Set result info bits */ if (is_broadcast_ether_addr(da)) ri = MVPP2_PRS_RI_L2_BCAST; else if (is_multicast_ether_addr(da)) ri = MVPP2_PRS_RI_L2_MCAST; else ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); /* Shift to ethertype */ mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); /* Update shadow table and hw entry */ priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); mvpp2_prs_hw_write(priv, pe); kfree(pe); return 0; } static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); int err; /* Remove old parser entry */ err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr, false); if (err) return err; /* Add new parser entry */ err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); if (err) return err; /* Set addr in the device */ ether_addr_copy(dev->dev_addr, da); return 0; } /* Delete all port's multicast simple (not range) entries */ static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port) { struct mvpp2_prs_entry pe; int index, tid; for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; if (!priv->prs_shadow[tid].valid || (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; /* Only simple mac entries */ pe.index = tid; mvpp2_prs_hw_read(priv, &pe); /* Read mac addr from entry */ for (index = 0; index < ETH_ALEN; index++) mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], &da_mask[index]); if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da)) /* Delete this entry */ mvpp2_prs_mac_da_accept(priv, port, da, false); } } static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); /* Remove port from DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); break; case MVPP2_TAG_TYPE_DSA: /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); /* Remove port from EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); break; case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); break; default: if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) return -EINVAL; } return 0; } /* Set prs flow for the port */ static int mvpp2_prs_def_flow(struct mvpp2_port *port) { struct mvpp2_prs_entry *pe; int tid; pe = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ if (!pe) { /* Go through the all entires from last to first */ tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); if (tid < 0) return tid; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return -ENOMEM; mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); pe->index = tid; /* Set flow ID*/ mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); } mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, pe); kfree(pe); return 0; } /* Classifier configuration routines */ /* Update classification flow table registers */ static void mvpp2_cls_flow_write(struct mvpp2 *priv, struct mvpp2_cls_flow_entry *fe) { mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); } /* Update classification lookup table register */ static void mvpp2_cls_lookup_write(struct mvpp2 *priv, struct mvpp2_cls_lookup_entry *le) { u32 val; val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); } /* Classifier default initialization */ static void mvpp2_cls_init(struct mvpp2 *priv) { struct mvpp2_cls_lookup_entry le; struct mvpp2_cls_flow_entry fe; int index; /* Enable classifier */ mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); /* Clear classifier flow table */ memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { fe.index = index; mvpp2_cls_flow_write(priv, &fe); } /* Clear classifier lookup table */ le.data = 0; for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { le.lkpid = index; le.way = 0; mvpp2_cls_lookup_write(priv, &le); le.way = 1; mvpp2_cls_lookup_write(priv, &le); } } static void mvpp2_cls_port_config(struct mvpp2_port *port) { struct mvpp2_cls_lookup_entry le; u32 val; /* Set way for the port */ val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); /* Pick the entry to be accessed in lookup ID decoding table * according to the way and lkpid. */ le.lkpid = port->id; le.way = 0; le.data = 0; /* Set initial CPU queue for receiving packets */ le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; le.data |= port->first_rxq; /* Disable classification engines */ le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; /* Update lookup ID table entry */ mvpp2_cls_lookup_write(port->priv, &le); } /* Set CPU queue number for oversize packets */ static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) { u32 val; mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); } /* Buffer Manager configuration routines */ /* Create pool */ static int mvpp2_bm_pool_create(struct platform_device *pdev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int size) { int size_bytes; u32 val; size_bytes = sizeof(u32) * size; bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, &bm_pool->phys_addr, GFP_KERNEL); if (!bm_pool->virt_addr) return -ENOMEM; if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, bm_pool->phys_addr); dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); return -ENOMEM; } mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), bm_pool->phys_addr); mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); val |= MVPP2_BM_START_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); bm_pool->type = MVPP2_BM_FREE; bm_pool->size = size; bm_pool->pkt_size = 0; bm_pool->buf_num = 0; atomic_set(&bm_pool->in_use, 0); spin_lock_init(&bm_pool->lock); return 0; } /* Set pool buffer size */ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int buf_size) { u32 val; bm_pool->buf_size = buf_size; val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); } /* Free all buffers from the pool */ static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) { int i; for (i = 0; i < bm_pool->buf_num; i++) { u32 vaddr; /* Get buffer virtual address (indirect access) */ mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); if (!vaddr) break; dev_kfree_skb_any((struct sk_buff *)vaddr); } /* Update BM driver with number of buffers removed from pool */ bm_pool->buf_num -= i; } /* Cleanup pool */ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) { u32 val; mvpp2_bm_bufs_free(priv, bm_pool); if (bm_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); return 0; } val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); val |= MVPP2_BM_STOP_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size, bm_pool->virt_addr, bm_pool->phys_addr); return 0; } static int mvpp2_bm_pools_init(struct platform_device *pdev, struct mvpp2 *priv) { int i, err, size; struct mvpp2_bm_pool *bm_pool; /* Create all pools with maximum size */ size = MVPP2_BM_POOL_SIZE_MAX; for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { bm_pool = &priv->bm_pools[i]; bm_pool->id = i; err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); if (err) goto err_unroll_pools; mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); } return 0; err_unroll_pools: dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); for (i = i - 1; i >= 0; i--) mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); return err; } static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) { int i, err; for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { /* Mask BM all interrupts */ mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); /* Clear BM cause register */ mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); } /* Allocate and initialize BM pools */ priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, sizeof(struct mvpp2_bm_pool), GFP_KERNEL); if (!priv->bm_pools) return -ENOMEM; err = mvpp2_bm_pools_init(pdev, priv); if (err < 0) return err; return 0; } /* Attach long pool to rxq */ static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int long_pool) { u32 val; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); val &= ~MVPP2_RXQ_POOL_LONG_MASK; val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK); mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } /* Attach short pool to rxq */ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int short_pool) { u32 val; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); val &= ~MVPP2_RXQ_POOL_SHORT_MASK; val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK); mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } /* Allocate skb for BM pool */ static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, dma_addr_t *buf_phys_addr, gfp_t gfp_mask) { struct sk_buff *skb; dma_addr_t phys_addr; skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask); if (!skb) return NULL; phys_addr = dma_map_single(port->dev->dev.parent, skb->head, MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) { dev_kfree_skb_any(skb); return NULL; } *buf_phys_addr = phys_addr; return skb; } /* Set pool number in a BM cookie */ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) { u32 bm; bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); return bm; } /* Get pool number from a BM cookie */ static inline int mvpp2_bm_cookie_pool_get(u32 cookie) { return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; } /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, u32 buf_phys_addr, u32 buf_virt_addr) { mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); } /* Release multicast buffer */ static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, u32 buf_phys_addr, u32 buf_virt_addr, int mc_id) { u32 val = 0; val |= (mc_id & MVPP2_BM_MC_ID_MASK); mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val); mvpp2_bm_pool_put(port, pool, buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK, buf_virt_addr); } /* Refill BM pool */ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, u32 phys_addr, u32 cookie) { int pool = mvpp2_bm_cookie_pool_get(bm); mvpp2_bm_pool_put(port, pool, phys_addr, cookie); } /* Allocate buffers for the pool */ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, int buf_num) { struct sk_buff *skb; int i, buf_size, total_size; u32 bm; dma_addr_t phys_addr; buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); total_size = MVPP2_RX_TOTAL_SIZE(buf_size); if (buf_num < 0 || (buf_num + bm_pool->buf_num > bm_pool->size)) { netdev_err(port->dev, "cannot allocate %d buffers for pool %d\n", buf_num, bm_pool->id); return 0; } bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id); for (i = 0; i < buf_num; i++) { skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); if (!skb) break; mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); } /* Update BM driver with number of buffers added to pool */ bm_pool->buf_num += i; bm_pool->in_use_thresh = bm_pool->buf_num / 4; netdev_dbg(port->dev, "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", bm_pool->id, bm_pool->pkt_size, buf_size, total_size); netdev_dbg(port->dev, "%s pool %d: %d of %d buffers added\n", bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", bm_pool->id, i, buf_num); return i; } /* Notify the driver that BM pool is being used as specific type and return the * pool pointer on success */ static struct mvpp2_bm_pool * mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, int pkt_size) { unsigned long flags = 0; struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { netdev_err(port->dev, "mixing pool types is forbidden\n"); return NULL; } spin_lock_irqsave(&new_pool->lock, flags); if (new_pool->type == MVPP2_BM_FREE) new_pool->type = type; /* Allocate buffers in case BM pool is used as long pool, but packet * size doesn't match MTU or BM pool hasn't being used yet */ if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || (new_pool->pkt_size == 0)) { int pkts_num; /* Set default buffer number or free all the buffers in case * the pool is not empty */ pkts_num = new_pool->buf_num; if (pkts_num == 0) pkts_num = type == MVPP2_BM_SWF_LONG ? MVPP2_BM_LONG_BUF_NUM : MVPP2_BM_SHORT_BUF_NUM; else mvpp2_bm_bufs_free(port->priv, new_pool); new_pool->pkt_size = pkt_size; /* Allocate buffers for this pool */ num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, pkts_num); /* We need to undo the bufs_add() allocations */ spin_unlock_irqrestore(&new_pool->lock, flags); return NULL; } } mvpp2_bm_pool_bufsize_set(port->priv, new_pool, MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); spin_unlock_irqrestore(&new_pool->lock, flags); return new_pool; } /* Initialize pools for swf */ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) { unsigned long flags = 0; int rxq; if (!port->pool_long) { port->pool_long = mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), MVPP2_BM_SWF_LONG, port->pkt_size); if (!port->pool_long) return -ENOMEM; spin_lock_irqsave(&port->pool_long->lock, flags); port->pool_long->port_map |= (1 << port->id); spin_unlock_irqrestore(&port->pool_long->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); } if (!port->pool_short) { port->pool_short = mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL, MVPP2_BM_SWF_SHORT, MVPP2_BM_SHORT_PKT_SIZE); if (!port->pool_short) return -ENOMEM; spin_lock_irqsave(&port->pool_short->lock, flags); port->pool_short->port_map |= (1 << port->id); spin_unlock_irqrestore(&port->pool_short->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_short_pool_set(port, rxq, port->pool_short->id); } return 0; } static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_bm_pool *port_pool = port->pool_long; int num, pkts_num = port_pool->buf_num; int pkt_size = MVPP2_RX_PKT_SIZE(mtu); /* Update BM pool with new buffer size */ mvpp2_bm_bufs_free(port->priv, port_pool); if (port_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); return -EIO; } port_pool->pkt_size = pkt_size; num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", port_pool->id, num, pkts_num); return -EIO; } mvpp2_bm_pool_bufsize_set(port->priv, port_pool, MVPP2_RX_BUF_SIZE(port_pool->pkt_size)); dev->mtu = mtu; netdev_update_features(dev); return 0; } static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) { int cpu, cpu_mask = 0; for_each_present_cpu(cpu) cpu_mask |= 1 << cpu; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask)); } static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) { int cpu, cpu_mask = 0; for_each_present_cpu(cpu) cpu_mask |= 1 << cpu; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask)); } /* Mask the current CPU's Rx/Tx interrupts */ static void mvpp2_interrupts_mask(void *arg) { struct mvpp2_port *port = arg; mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); } /* Unmask the current CPU's Rx/Tx interrupts */ static void mvpp2_interrupts_unmask(void *arg) { struct mvpp2_port *port = arg; mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), (MVPP2_CAUSE_MISC_SUM_MASK | MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); } /* Port configuration routines */ static void mvpp2_port_mii_set(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); switch (port->phy_interface) { case PHY_INTERFACE_MODE_SGMII: val |= MVPP2_GMAC_INBAND_AN_MASK; break; case PHY_INTERFACE_MODE_RGMII: val |= MVPP2_GMAC_PORT_RGMII_MASK; default: val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; } writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); } static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); val |= MVPP2_GMAC_FC_ADV_EN; writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); } static void mvpp2_port_enable(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); val |= MVPP2_GMAC_PORT_EN_MASK; val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } static void mvpp2_port_disable(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); val &= ~(MVPP2_GMAC_PORT_EN_MASK); writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); } /* Configure loopback port */ static void mvpp2_port_loopback_set(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); if (port->speed == 1000) val |= MVPP2_GMAC_GMII_LB_EN_MASK; else val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) val |= MVPP2_GMAC_PCS_LB_EN_MASK; else val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); } static void mvpp2_port_reset(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & ~MVPP2_GMAC_PORT_RESET_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & MVPP2_GMAC_PORT_RESET_MASK) continue; } /* Change maximum receive size of the port */ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) { u32 val; val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << MVPP2_GMAC_MAX_RX_SIZE_OFFS); writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } /* Set defaults to the MVPP2 port */ static void mvpp2_defaults_set(struct mvpp2_port *port) { int tx_port_num, val, queue, ptxq, lrxq; /* Configure port to loopback if needed */ if (port->flags & MVPP2_F_LOOPBACK) mvpp2_port_loopback_set(port); /* Update TX FIFO MIN Threshold */ val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; /* Min. TX threshold must be less than minimal packet length */ val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); /* Disable Legacy WRR, Disable EJP, Release from reset */ tx_port_num = mvpp2_egress_port(port); mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); /* Close bandwidth for all queues */ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { ptxq = mvpp2_txq_phys(port->id, queue); mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); } /* Set refill period to 1 usec, refill tokens * and bucket size to maximum */ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, port->priv->tclk / USEC_PER_SEC); val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); val = MVPP2_TXP_TOKEN_SIZE_MAX; mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); /* Set MaximumLowLatencyPacketSize value to 256 */ mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); /* Enable Rx cache snoop */ for (lrxq = 0; lrxq < rxq_number; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_SNOOP_PKT_SIZE_MASK | MVPP2_SNOOP_BUF_HDR_MASK; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); } /* At default, mask all interrupts to all present cpus */ mvpp2_interrupts_disable(port); } /* Enable/disable receiving packets */ static void mvpp2_ingress_enable(struct mvpp2_port *port) { u32 val; int lrxq, queue; for (lrxq = 0; lrxq < rxq_number; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val &= ~MVPP2_RXQ_DISABLE_MASK; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); } } static void mvpp2_ingress_disable(struct mvpp2_port *port) { u32 val; int lrxq, queue; for (lrxq = 0; lrxq < rxq_number; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_RXQ_DISABLE_MASK; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); } } /* Enable transmit via physical egress queue * - HW starts take descriptors from DRAM */ static void mvpp2_egress_enable(struct mvpp2_port *port) { u32 qmap; int queue; int tx_port_num = mvpp2_egress_port(port); /* Enable all initialized TXs. */ qmap = 0; for (queue = 0; queue < txq_number; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; if (txq->descs != NULL) qmap |= (1 << queue); } mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); } /* Disable transmit via physical egress queue * - HW doesn't take descriptors from DRAM */ static void mvpp2_egress_disable(struct mvpp2_port *port) { u32 reg_data; int delay; int tx_port_num = mvpp2_egress_port(port); /* Issue stop command for active channels only */ mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & MVPP2_TXP_SCHED_ENQ_MASK; if (reg_data != 0) mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); /* Wait for all Tx activity to terminate. */ delay = 0; do { if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { netdev_warn(port->dev, "Tx stop timed out, status=0x%08x\n", reg_data); break; } mdelay(1); delay++; /* Check port TX Command register that all * Tx queues are stopped */ reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); } /* Rx descriptors helper methods */ /* Get number of Rx descriptors occupied by received packets */ static inline int mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) { u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); return val & MVPP2_RXQ_OCCUPIED_MASK; } /* Update Rx queue status with the number of occupied and available * Rx descriptor slots. */ static inline void mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, int used_count, int free_count) { /* Decrement the number of used descriptors and increment count * increment the number of free descriptors. */ u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); } /* Get pointer to next RX descriptor to be processed by SW */ static inline struct mvpp2_rx_desc * mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) { int rx_desc = rxq->next_desc_to_proc; rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); prefetch(rxq->descs + rxq->next_desc_to_proc); return rxq->descs + rx_desc; } /* Set rx queue offset */ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset) { u32 val; /* Convert offset from bytes to units of 32 bytes */ offset = offset >> 5; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; /* Offset is in */ val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & MVPP2_RXQ_PACKET_OFFSET_MASK); mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } /* Obtain BM cookie information from descriptor */ static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc) { int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS; int cpu = smp_processor_id(); return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); } /* Tx descriptors helper methods */ /* Get number of Tx descriptors waiting to be transmitted by HW */ static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { u32 val; mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); return val & MVPP2_TXQ_PENDING_MASK; } /* Get pointer to next Tx descriptor to be processed (send) by HW */ static struct mvpp2_tx_desc * mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) { int tx_desc = txq->next_desc_to_proc; txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); return txq->descs + tx_desc; } /* Update HW with number of aggregated Tx descriptors to be sent */ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); } /* Check if there are enough free descriptors in aggregated txq. * If not, update the number of occupied descriptors and repeat the check. */ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, struct mvpp2_tx_queue *aggr_txq, int num) { if ((aggr_txq->count + num) > aggr_txq->size) { /* Update number of occupied aggregated Tx descriptors */ int cpu = smp_processor_id(); u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; } if ((aggr_txq->count + num) > aggr_txq->size) return -ENOMEM; return 0; } /* Reserved Tx descriptors allocation request */ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, struct mvpp2_tx_queue *txq, int num) { u32 val; val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val); val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG); return val & MVPP2_TXQ_RSVD_RSLT_MASK; } /* Check if there are enough reserved descriptors for transmission. * If not, request chunk of reserved descriptors and check again. */ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { int req, cpu, desc_count; if (txq_pcpu->reserved_num >= num) return 0; /* Not enough descriptors reserved! Update the reserved descriptor * count and check again. */ desc_count = 0; /* Compute total of used descriptors */ for_each_present_cpu(cpu) { struct mvpp2_txq_pcpu *txq_pcpu_aux; txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); desc_count += txq_pcpu_aux->count; desc_count += txq_pcpu_aux->reserved_num; } req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); desc_count += req; if (desc_count > (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) return -ENOMEM; txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); /* OK, the descriptor cound has been updated: check again. */ if (txq_pcpu->reserved_num < num) return -ENOMEM; return 0; } /* Release the last allocated Tx descriptor. Useful to handle DMA * mapping failures in the Tx path. */ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) { if (txq->next_desc_to_proc == 0) txq->next_desc_to_proc = txq->last_desc - 1; else txq->next_desc_to_proc--; } /* Set Tx descriptors fields relevant for CSUM calculation */ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, int ip_hdr_len, int l4_proto) { u32 command; /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, * G_L4_chk, L4_type required only for checksum calculation */ command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); command |= MVPP2_TXD_IP_CSUM_DISABLE; if (l3_proto == swab16(ETH_P_IP)) { command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ } else { command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ } if (l4_proto == IPPROTO_TCP) { command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ } else if (l4_proto == IPPROTO_UDP) { command |= MVPP2_TXD_L4_UDP; /* enable UDP */ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ } else { command |= MVPP2_TXD_L4_CSUM_NOT; } return command; } /* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. * Per-CPU access */ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { u32 val; /* Reading status reg resets transmitted descriptor counter */ val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> MVPP2_TRANSMITTED_COUNT_OFFSET; } static void mvpp2_txq_sent_counter_clear(void *arg) { struct mvpp2_port *port = arg; int queue; for (queue = 0; queue < txq_number; queue++) { int id = port->txqs[queue]->id; mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); } } /* Set max sizes for Tx queues */ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) { u32 val, size, mtu; int txq, tx_port_num; mtu = port->pkt_size * 8; if (mtu > MVPP2_TXP_MTU_MAX) mtu = MVPP2_TXP_MTU_MAX; /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ mtu = 3 * mtu; /* Indirect access to registers */ tx_port_num = mvpp2_egress_port(port); mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); /* Set MTU */ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); val &= ~MVPP2_TXP_MTU_MAX; val |= mtu; mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); /* TXP token size and all TXQs token size must be larger that MTU */ val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); size = val & MVPP2_TXP_TOKEN_SIZE_MAX; if (size < mtu) { size = mtu; val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; val |= size; mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); } for (txq = 0; txq < txq_number; txq++) { val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; if (size < mtu) { size = mtu; val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; val |= size; mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val); } } } /* Set the number of packets that will be received before Rx interrupt * will be generated by HW. */ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, u32 pkts) { u32 val; val = (pkts & MVPP2_OCCUPIED_THRESH_MASK); mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); rxq->pkts_coal = pkts; } /* Set the time delay in usec before Rx interrupt */ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, u32 usec) { u32 val; val = (port->priv->tclk / USEC_PER_SEC) * usec; mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); rxq->time_coal = usec; } /* Set threshold for TX_DONE pkts coalescing */ static void mvpp2_tx_done_pkts_coal_set(void *arg) { struct mvpp2_port *port = arg; int queue; u32 val; for (queue = 0; queue < txq_number; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) & MVPP2_TRANSMITTED_THRESH_MASK; mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val); } } /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { int i; for (i = 0; i < num; i++) { struct mvpp2_tx_desc *tx_desc = txq->descs + txq_pcpu->txq_get_index; struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; mvpp2_txq_inc_get(txq_pcpu); if (!skb) continue; dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, u32 cause) { int queue = fls(cause) - 1; return port->rxqs[queue]; } static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, u32 cause) { int queue = fls(cause >> 16) - 1; return port->txqs[queue]; } /* Handle end of transmission */ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu) { struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); int tx_done; if (txq_pcpu->cpu != smp_processor_id()) netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); tx_done = mvpp2_txq_sent_desc_proc(port, txq); if (!tx_done) return; mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); txq_pcpu->count -= tx_done; if (netif_tx_queue_stopped(nq)) if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1) netif_tx_wake_queue(nq); } /* Rx/Tx queue initialization/cleanup methods */ /* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, struct mvpp2_tx_queue *aggr_txq, int desc_num, int cpu, struct mvpp2 *priv) { /* Allocate memory for TX descriptors */ aggr_txq->descs = dma_alloc_coherent(&pdev->dev, desc_num * MVPP2_DESC_ALIGNED_SIZE, &aggr_txq->descs_phys, GFP_KERNEL); if (!aggr_txq->descs) return -ENOMEM; /* Make sure descriptor address is cache line size aligned */ BUG_ON(aggr_txq->descs != PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); aggr_txq->last_desc = aggr_txq->size - 1; /* Aggr TXQ no reset WA */ aggr_txq->next_desc_to_proc = mvpp2_read(priv, MVPP2_AGGR_TXQ_INDEX_REG(cpu)); /* Set Tx descriptors queue starting address */ /* indirect access */ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), aggr_txq->descs_phys); mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); return 0; } /* Create a specified Rx queue */ static int mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { rxq->size = port->rx_ring_size; /* Allocate memory for RX descriptors */ rxq->descs = dma_alloc_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, &rxq->descs_phys, GFP_KERNEL); if (!rxq->descs) return -ENOMEM; BUG_ON(rxq->descs != PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); rxq->last_desc = rxq->size - 1; /* Zero occupied and non-occupied counters - direct access */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); /* Set Rx descriptors queue starting address - indirect access */ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys); mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); /* Set Offset */ mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); /* Set coalescing pkts and time */ mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); /* Add number of descriptors ready for receiving packets */ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); return 0; } /* Push packets received by the RXQ to BM pool */ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { int rx_received, i; rx_received = mvpp2_rxq_received(port, rxq->id); if (!rx_received) return; for (i = 0; i < rx_received; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); u32 bm = mvpp2_bm_cookie_build(rx_desc); mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, rx_desc->buf_cookie); } mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); } /* Cleanup Rx queue */ static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { mvpp2_rxq_drop_pkts(port, rxq); if (rxq->descs) dma_free_coherent(port->dev->dev.parent, rxq->size * MVPP2_DESC_ALIGNED_SIZE, rxq->descs, rxq->descs_phys); rxq->descs = NULL; rxq->last_desc = 0; rxq->next_desc_to_proc = 0; rxq->descs_phys = 0; /* Clear Rx descriptors queue starting address and size; * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); } /* Create and initialize a Tx queue */ static int mvpp2_txq_init(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { u32 val; int cpu, desc, desc_per_txq, tx_port_num; struct mvpp2_txq_pcpu *txq_pcpu; txq->size = port->tx_ring_size; /* Allocate memory for Tx descriptors */ txq->descs = dma_alloc_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, &txq->descs_phys, GFP_KERNEL); if (!txq->descs) return -ENOMEM; /* Make sure descriptor address is cache line size aligned */ BUG_ON(txq->descs != PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); txq->last_desc = txq->size - 1; /* Set Tx descriptors queue starting address - indirect access */ mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys); mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); /* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS */ desc_per_txq = 16; desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq); mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq/2)); /* WRR / EJP configuration - indirect access */ tx_port_num = mvpp2_egress_port(port); mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); val = MVPP2_TXQ_TOKEN_SIZE_MAX; mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), val); for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); txq_pcpu->size = txq->size; txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * sizeof(*txq_pcpu->tx_skb), GFP_KERNEL); if (!txq_pcpu->tx_skb) { dma_free_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); return -ENOMEM; } txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; } return 0; } /* Free allocated TXQ resources */ static void mvpp2_txq_deinit(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; int cpu; for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->tx_skb); } if (txq->descs) dma_free_coherent(port->dev->dev.parent, txq->size * MVPP2_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; txq->descs_phys = 0; /* Set minimum bandwidth for disabled TXQs */ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); } /* Cleanup Tx ports */ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; int delay, pending, cpu; u32 val; mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); /* The napi queue has been stopped so wait for all packets * to be transmitted. */ delay = 0; do { if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { netdev_warn(port->dev, "port %d: cleaning queue %d timed out\n", port->id, txq->log_id); break; } mdelay(1); delay++; pending = mvpp2_txq_pend_desc_num_get(port, txq); } while (pending); val &= ~MVPP2_TXQ_DRAIN_EN_MASK; mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); /* Release all packets */ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); /* Reset queue */ txq_pcpu->count = 0; txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; } } /* Cleanup all Tx queues */ static void mvpp2_cleanup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; int queue; u32 val; val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); /* Reset Tx ports and delete Tx queues */ val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); for (queue = 0; queue < txq_number; queue++) { txq = port->txqs[queue]; mvpp2_txq_clean(port, txq); mvpp2_txq_deinit(port, txq); } on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); } /* Cleanup all Rx queues */ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) { int queue; for (queue = 0; queue < rxq_number; queue++) mvpp2_rxq_deinit(port, port->rxqs[queue]); } /* Init all Rx queues for port */ static int mvpp2_setup_rxqs(struct mvpp2_port *port) { int queue, err; for (queue = 0; queue < rxq_number; queue++) { err = mvpp2_rxq_init(port, port->rxqs[queue]); if (err) goto err_cleanup; } return 0; err_cleanup: mvpp2_cleanup_rxqs(port); return err; } /* Init all tx queues for port */ static int mvpp2_setup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; int queue, err; for (queue = 0; queue < txq_number; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) goto err_cleanup; } on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; err_cleanup: mvpp2_cleanup_txqs(port); return err; } /* The callback for per-port interrupt */ static irqreturn_t mvpp2_isr(int irq, void *dev_id) { struct mvpp2_port *port = (struct mvpp2_port *)dev_id; mvpp2_interrupts_disable(port); napi_schedule(&port->napi); return IRQ_HANDLED; } /* Adjust link */ static void mvpp2_link_event(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct phy_device *phydev = port->phy_dev; int status_change = 0; u32 val; if (phydev->link) { if ((port->speed != phydev->speed) || (port->duplex != phydev->duplex)) { u32 val; val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_AN_DUPLEX_EN); if (phydev->duplex) val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; if (phydev->speed == SPEED_1000) val |= MVPP2_GMAC_CONFIG_GMII_SPEED; else if (phydev->speed == SPEED_100) val |= MVPP2_GMAC_CONFIG_MII_SPEED; writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); port->duplex = phydev->duplex; port->speed = phydev->speed; } } if (phydev->link != port->link) { if (!phydev->link) { port->duplex = -1; port->speed = 0; } port->link = phydev->link; status_change = 1; } if (status_change) { if (phydev->link) { val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); val |= (MVPP2_GMAC_FORCE_LINK_PASS | MVPP2_GMAC_FORCE_LINK_DOWN); writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); } else { mvpp2_ingress_disable(port); mvpp2_egress_disable(port); } phy_print_status(phydev); } } /* Main RX/TX processing routines */ /* Display more error info */ static void mvpp2_rx_error(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { u32 status = rx_desc->status; switch (status & MVPP2_RXD_ERR_CODE_MASK) { case MVPP2_RXD_ERR_CRC: netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n", status, rx_desc->data_size); break; case MVPP2_RXD_ERR_OVERRUN: netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n", status, rx_desc->data_size); break; case MVPP2_RXD_ERR_RESOURCE: netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n", status, rx_desc->data_size); break; } } /* Handle RX checksum offload */ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, struct sk_buff *skb) { if (((status & MVPP2_RXD_L3_IP4) && !(status & MVPP2_RXD_IP4_HEADER_ERR)) || (status & MVPP2_RXD_L3_IP6)) if (((status & MVPP2_RXD_L4_UDP) || (status & MVPP2_RXD_L4_TCP)) && (status & MVPP2_RXD_L4_CSUM_OK)) { skb->csum = 0; skb->ip_summed = CHECKSUM_UNNECESSARY; return; } skb->ip_summed = CHECKSUM_NONE; } /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, u32 bm, int is_recycle) { struct sk_buff *skb; dma_addr_t phys_addr; if (is_recycle && (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh)) return 0; /* No recycle or too many buffers are in use, so allocate a new skb */ skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); if (!skb) return -ENOMEM; mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); atomic_dec(&bm_pool->in_use); return 0; } /* Handle tx checksum */ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_PARTIAL) { int ip_hdr_len = 0; u8 l4_proto; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *ip4h = ip_hdr(skb); /* Calculate IPv4 checksum and L4 checksum */ ip_hdr_len = ip4h->ihl; l4_proto = ip4h->protocol; } else if (skb->protocol == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = ipv6_hdr(skb); /* Read l4_protocol from one of IPv6 extra headers */ if (skb_network_header_len(skb) > 0) ip_hdr_len = (skb_network_header_len(skb) >> 2); l4_proto = ip6h->nexthdr; } else { return MVPP2_TXD_L4_CSUM_NOT; } return mvpp2_txq_desc_csum(skb_network_offset(skb), skb->protocol, ip_hdr_len, l4_proto); } return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; } static void mvpp2_buff_hdr_rx(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { struct mvpp2_buff_hdr *buff_hdr; struct sk_buff *skb; u32 rx_status = rx_desc->status; u32 buff_phys_addr; u32 buff_virt_addr; u32 buff_phys_addr_next; u32 buff_virt_addr_next; int mc_id; int pool_id; pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS; buff_phys_addr = rx_desc->buf_phys_addr; buff_virt_addr = rx_desc->buf_cookie; do { skb = (struct sk_buff *)buff_virt_addr; buff_hdr = (struct mvpp2_buff_hdr *)skb->head; mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info); buff_phys_addr_next = buff_hdr->next_buff_phys_addr; buff_virt_addr_next = buff_hdr->next_buff_virt_addr; /* Release buffer */ mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr, buff_virt_addr, mc_id); buff_phys_addr = buff_phys_addr_next; buff_virt_addr = buff_virt_addr_next; } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info)); } /* Main rx processing */ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; int rx_received, rx_filled, i; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; /* Get number of received packets and clamp the to-do */ rx_received = mvpp2_rxq_received(port, rxq->id); if (rx_todo > rx_received) rx_todo = rx_received; rx_filled = 0; for (i = 0; i < rx_todo; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; struct sk_buff *skb; u32 bm, rx_status; int pool, rx_bytes, err; rx_filled++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; bm = mvpp2_bm_cookie_build(rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); bm_pool = &port->priv->bm_pools[pool]; /* Check if buffer header is used */ if (rx_status & MVPP2_RXD_BUF_HDR) { mvpp2_buff_hdr_rx(port, rx_desc); continue; } /* In case of an error, release the requested buffer pointer * to the Buffer Manager. This request process is controlled * by the hardware, and the information about the buffer is * comprised by the RX descriptor. */ if (rx_status & MVPP2_RXD_ERR_SUMMARY) { dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, rx_desc->buf_cookie); continue; } skb = (struct sk_buff *)rx_desc->buf_cookie; rcvd_pkts++; rcvd_bytes += rx_bytes; atomic_inc(&bm_pool->in_use); skb_reserve(skb, MVPP2_MH_SIZE); skb_put(skb, rx_bytes); skb->protocol = eth_type_trans(skb, dev); mvpp2_rx_csum(port, rx_status, skb); napi_gro_receive(&port->napi, skb); err = mvpp2_rx_refill(port, bm_pool, bm, 0); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); rx_filled--; } } if (rcvd_pkts) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); u64_stats_update_begin(&stats->syncp); stats->rx_packets += rcvd_pkts; stats->rx_bytes += rcvd_bytes; u64_stats_update_end(&stats->syncp); } /* Update Rx queue management counters */ wmb(); mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); return rx_todo; } static inline void tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { dma_unmap_single(dev, desc->buf_phys_addr, desc->data_size, DMA_TO_DEVICE); mvpp2_txq_desc_put(txq); } /* Handle tx fragmentation processing */ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, struct mvpp2_tx_queue *aggr_txq, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); struct mvpp2_tx_desc *tx_desc; int i; dma_addr_t buf_phys_addr; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = page_address(frag->page.p) + frag->page_offset; tx_desc = mvpp2_txq_next_desc_get(aggr_txq); tx_desc->phys_txq = txq->id; tx_desc->data_size = frag->size; buf_phys_addr = dma_map_single(port->dev->dev.parent, addr, tx_desc->data_size, DMA_TO_DEVICE); if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) { mvpp2_txq_desc_put(txq); goto error; } tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN); if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ tx_desc->command = MVPP2_TXD_L_DESC; mvpp2_txq_inc_put(txq_pcpu, skb); } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; mvpp2_txq_inc_put(txq_pcpu, NULL); } } return 0; error: /* Release all descriptors that were used to map fragments of * this packet, as well as the corresponding DMA mappings */ for (i = i - 1; i >= 0; i--) { tx_desc = txq->descs + i; tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); } return -ENOMEM; } /* Main tx processing */ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; dma_addr_t buf_phys_addr; int frags = 0; u16 txq_id; u32 tx_cmd; txq_id = skb_get_queue_mapping(skb); txq = port->txqs[txq_id]; txq_pcpu = this_cpu_ptr(txq->pcpu); aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; frags = skb_shinfo(skb)->nr_frags + 1; /* Check number of available descriptors */ if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, frags)) { frags = 0; goto out; } /* Get a descriptor for the first part of the packet */ tx_desc = mvpp2_txq_next_desc_get(aggr_txq); tx_desc->phys_txq = txq->id; tx_desc->data_size = skb_headlen(skb); buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, tx_desc->data_size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) { mvpp2_txq_desc_put(txq); frags = 0; goto out; } tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN; tx_cmd = mvpp2_skb_tx_csum(port, skb); if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; tx_desc->command = tx_cmd; mvpp2_txq_inc_put(txq_pcpu, skb); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; tx_desc->command = tx_cmd; mvpp2_txq_inc_put(txq_pcpu, NULL); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); frags = 0; goto out; } } txq_pcpu->reserved_num -= frags; txq_pcpu->count += frags; aggr_txq->count += frags; /* Enable transmit */ wmb(); mvpp2_aggr_txq_pend_desc_add(port, frags); if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) { struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); netif_tx_stop_queue(nq); } out: if (frags > 0) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += skb->len; u64_stats_update_end(&stats->syncp); } else { dev->stats.tx_dropped++; dev_kfree_skb_any(skb); } return NETDEV_TX_OK; } static inline void mvpp2_cause_error(struct net_device *dev, int cause) { if (cause & MVPP2_CAUSE_FCS_ERR_MASK) netdev_err(dev, "FCS error\n"); if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) netdev_err(dev, "rx fifo overrun error\n"); if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) netdev_err(dev, "tx fifo underrun error\n"); } static void mvpp2_txq_done_percpu(void *arg) { struct mvpp2_port *port = arg; u32 cause_rx_tx, cause_tx, cause_misc; /* Rx/Tx cause register * * Bits 0-15: each bit indicates received packets on the Rx queue * (bit 0 is for Rx queue 0). * * Bits 16-23: each bit indicates transmitted packets on the Tx queue * (bit 16 is for Tx queue 0). * * Each CPU has its own Rx/Tx cause register */ cause_rx_tx = mvpp2_read(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { mvpp2_cause_error(port->dev, cause_misc); /* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } /* Release TX descriptors */ if (cause_tx) { struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx); struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); if (txq_pcpu->count) mvpp2_txq_done(port, txq, txq_pcpu); } } static int mvpp2_poll(struct napi_struct *napi, int budget) { u32 cause_rx_tx, cause_rx; int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); on_each_cpu(mvpp2_txq_done_percpu, port, 1); cause_rx_tx = mvpp2_read(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; /* Process RX packets */ cause_rx |= port->pending_cause_rx; while (cause_rx && budget > 0) { int count; struct mvpp2_rx_queue *rxq; rxq = mvpp2_get_rx_queue(port, cause_rx); if (!rxq) break; count = mvpp2_rx(port, budget, rxq); rx_done += count; budget -= count; if (budget > 0) { /* Clear the bit associated to this Rx queue * so that next iteration will continue from * the next Rx queue. */ cause_rx &= ~(1 << rxq->logic_rxq); } } if (budget > 0) { cause_rx = 0; napi_complete(napi); mvpp2_interrupts_enable(port); } port->pending_cause_rx = cause_rx; return rx_done; } /* Set hw internals when starting port */ static void mvpp2_start_dev(struct mvpp2_port *port) { mvpp2_gmac_max_rx_size_set(port); mvpp2_txp_max_tx_size_set(port); napi_enable(&port->napi); /* Enable interrupts on all CPUs */ mvpp2_interrupts_enable(port); mvpp2_port_enable(port); phy_start(port->phy_dev); netif_tx_start_all_queues(port->dev); } /* Set hw internals when stopping port */ static void mvpp2_stop_dev(struct mvpp2_port *port) { /* Stop new packets from arriving to RXQs */ mvpp2_ingress_disable(port); mdelay(10); /* Disable interrupts on all CPUs */ mvpp2_interrupts_disable(port); napi_disable(&port->napi); netif_carrier_off(port->dev); netif_tx_stop_all_queues(port->dev); mvpp2_egress_disable(port); mvpp2_port_disable(port); phy_stop(port->phy_dev); } /* Return positive if MTU is valid */ static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu) { if (mtu < 68) { netdev_err(dev, "cannot change mtu to less than 68\n"); return -EINVAL; } /* 9676 == 9700 - 20 and rounding to 8 */ if (mtu > 9676) { netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu); mtu = 9676; } if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); } return mtu; } static int mvpp2_check_ringparam_valid(struct net_device *dev, struct ethtool_ringparam *ring) { u16 new_rx_pending = ring->rx_pending; u16 new_tx_pending = ring->tx_pending; if (ring->rx_pending == 0 || ring->tx_pending == 0) return -EINVAL; if (ring->rx_pending > MVPP2_MAX_RXD) new_rx_pending = MVPP2_MAX_RXD; else if (!IS_ALIGNED(ring->rx_pending, 16)) new_rx_pending = ALIGN(ring->rx_pending, 16); if (ring->tx_pending > MVPP2_MAX_TXD) new_tx_pending = MVPP2_MAX_TXD; else if (!IS_ALIGNED(ring->tx_pending, 32)) new_tx_pending = ALIGN(ring->tx_pending, 32); if (ring->rx_pending != new_rx_pending) { netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", ring->rx_pending, new_rx_pending); ring->rx_pending = new_rx_pending; } if (ring->tx_pending != new_tx_pending) { netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", ring->tx_pending, new_tx_pending); ring->tx_pending = new_tx_pending; } return 0; } static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr) { u32 mac_addr_l, mac_addr_m, mac_addr_h; mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); addr[0] = (mac_addr_h >> 24) & 0xFF; addr[1] = (mac_addr_h >> 16) & 0xFF; addr[2] = (mac_addr_h >> 8) & 0xFF; addr[3] = mac_addr_h & 0xFF; addr[4] = mac_addr_m & 0xFF; addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; } static int mvpp2_phy_connect(struct mvpp2_port *port) { struct phy_device *phy_dev; phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0, port->phy_interface); if (!phy_dev) { netdev_err(port->dev, "cannot connect to phy\n"); return -ENODEV; } phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->advertising = phy_dev->supported; port->phy_dev = phy_dev; port->link = 0; port->duplex = 0; port->speed = 0; return 0; } static void mvpp2_phy_disconnect(struct mvpp2_port *port) { phy_disconnect(port->phy_dev); port->phy_dev = NULL; } static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); unsigned char mac_bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; int err; err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); if (err) { netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); return err; } err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr, true); if (err) { netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); return err; } err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); if (err) { netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); return err; } err = mvpp2_prs_def_flow(port); if (err) { netdev_err(dev, "mvpp2_prs_def_flow failed\n"); return err; } /* Allocate the Rx/Tx queues */ err = mvpp2_setup_rxqs(port); if (err) { netdev_err(port->dev, "cannot allocate Rx queues\n"); return err; } err = mvpp2_setup_txqs(port); if (err) { netdev_err(port->dev, "cannot allocate Tx queues\n"); goto err_cleanup_rxqs; } err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port); if (err) { netdev_err(port->dev, "cannot request IRQ %d\n", port->irq); goto err_cleanup_txqs; } /* In default link is down */ netif_carrier_off(port->dev); err = mvpp2_phy_connect(port); if (err < 0) goto err_free_irq; /* Unmask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_unmask, port, 1); mvpp2_start_dev(port); return 0; err_free_irq: free_irq(port->irq, port); err_cleanup_txqs: mvpp2_cleanup_txqs(port); err_cleanup_rxqs: mvpp2_cleanup_rxqs(port); return err; } static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); mvpp2_stop_dev(port); mvpp2_phy_disconnect(port); /* Mask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_mask, port, 1); free_irq(port->irq, port); mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); return 0; } static void mvpp2_set_rx_mode(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2 *priv = port->priv; struct netdev_hw_addr *ha; int id = port->id; bool allmulti = dev->flags & IFF_ALLMULTI; mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); /* Remove all port->id's mcast enries */ mvpp2_prs_mcast_del_all(priv, id); if (allmulti && !netdev_mc_empty(dev)) { netdev_for_each_mc_addr(ha, dev) mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); } } static int mvpp2_set_mac_address(struct net_device *dev, void *p) { struct mvpp2_port *port = netdev_priv(dev); const struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) { err = -EADDRNOTAVAIL; goto error; } if (!netif_running(dev)) { err = mvpp2_prs_update_mac_da(dev, addr->sa_data); if (!err) return 0; /* Reconfigure parser to accept the original MAC address */ err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); if (err) goto error; } mvpp2_stop_dev(port); err = mvpp2_prs_update_mac_da(dev, addr->sa_data); if (!err) goto out_start; /* Reconfigure parser accept the original MAC address */ err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); if (err) goto error; out_start: mvpp2_start_dev(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); return 0; error: netdev_err(dev, "fail to change MAC address\n"); return err; } static int mvpp2_change_mtu(struct net_device *dev, int mtu) { struct mvpp2_port *port = netdev_priv(dev); int err; mtu = mvpp2_check_mtu_valid(dev, mtu); if (mtu < 0) { err = mtu; goto error; } if (!netif_running(dev)) { err = mvpp2_bm_update_mtu(dev, mtu); if (!err) { port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); return 0; } /* Reconfigure BM to the original MTU */ err = mvpp2_bm_update_mtu(dev, dev->mtu); if (err) goto error; } mvpp2_stop_dev(port); err = mvpp2_bm_update_mtu(dev, mtu); if (!err) { port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); goto out_start; } /* Reconfigure BM to the original MTU */ err = mvpp2_bm_update_mtu(dev, dev->mtu); if (err) goto error; out_start: mvpp2_start_dev(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); return 0; error: netdev_err(dev, "fail to change MTU\n"); return err; } static struct rtnl_link_stats64 * mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mvpp2_port *port = netdev_priv(dev); unsigned int start; int cpu; for_each_possible_cpu(cpu) { struct mvpp2_pcpu_stats *cpu_stats; u64 rx_packets; u64 rx_bytes; u64 tx_packets; u64 tx_bytes; cpu_stats = per_cpu_ptr(port->stats, cpu); do { start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); rx_packets = cpu_stats->rx_packets; rx_bytes = cpu_stats->rx_bytes; tx_packets = cpu_stats->tx_packets; tx_bytes = cpu_stats->tx_bytes; } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; } stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; return stats; } static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mvpp2_port *port = netdev_priv(dev); int ret; if (!port->phy_dev) return -ENOTSUPP; ret = phy_mii_ioctl(port->phy_dev, ifr, cmd); if (!ret) mvpp2_link_event(dev); return ret; } /* Ethtool methods */ /* Get settings (phy address, speed) for ethtools */ static int mvpp2_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mvpp2_port *port = netdev_priv(dev); if (!port->phy_dev) return -ENODEV; return phy_ethtool_gset(port->phy_dev, cmd); } /* Set settings (phy address, speed) for ethtools */ static int mvpp2_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct mvpp2_port *port = netdev_priv(dev); if (!port->phy_dev) return -ENODEV; return phy_ethtool_sset(port->phy_dev, cmd); } /* Set interrupt coalescing for ethtools */ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct mvpp2_port *port = netdev_priv(dev); int queue; for (queue = 0; queue < rxq_number; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->time_coal = c->rx_coalesce_usecs; rxq->pkts_coal = c->rx_max_coalesced_frames; mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); } for (queue = 0; queue < txq_number; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; txq->done_pkts_coal = c->tx_max_coalesced_frames; } on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); return 0; } /* get coalescing for ethtools */ static int mvpp2_ethtool_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct mvpp2_port *port = netdev_priv(dev); c->rx_coalesce_usecs = port->rxqs[0]->time_coal; c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; return 0; } static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, dev_name(&dev->dev), sizeof(drvinfo->bus_info)); } static void mvpp2_ethtool_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct mvpp2_port *port = netdev_priv(dev); ring->rx_max_pending = MVPP2_MAX_RXD; ring->tx_max_pending = MVPP2_MAX_TXD; ring->rx_pending = port->rx_ring_size; ring->tx_pending = port->tx_ring_size; } static int mvpp2_ethtool_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct mvpp2_port *port = netdev_priv(dev); u16 prev_rx_ring_size = port->rx_ring_size; u16 prev_tx_ring_size = port->tx_ring_size; int err; err = mvpp2_check_ringparam_valid(dev, ring); if (err) return err; if (!netif_running(dev)) { port->rx_ring_size = ring->rx_pending; port->tx_ring_size = ring->tx_pending; return 0; } /* The interface is running, so we have to force a * reallocation of the queues */ mvpp2_stop_dev(port); mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); port->rx_ring_size = ring->rx_pending; port->tx_ring_size = ring->tx_pending; err = mvpp2_setup_rxqs(port); if (err) { /* Reallocate Rx queues with the original ring size */ port->rx_ring_size = prev_rx_ring_size; ring->rx_pending = prev_rx_ring_size; err = mvpp2_setup_rxqs(port); if (err) goto err_out; } err = mvpp2_setup_txqs(port); if (err) { /* Reallocate Tx queues with the original ring size */ port->tx_ring_size = prev_tx_ring_size; ring->tx_pending = prev_tx_ring_size; err = mvpp2_setup_txqs(port); if (err) goto err_clean_rxqs; } mvpp2_start_dev(port); mvpp2_egress_enable(port); mvpp2_ingress_enable(port); return 0; err_clean_rxqs: mvpp2_cleanup_rxqs(port); err_out: netdev_err(dev, "fail to change ring parameters"); return err; } /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_open = mvpp2_open, .ndo_stop = mvpp2_stop, .ndo_start_xmit = mvpp2_tx, .ndo_set_rx_mode = mvpp2_set_rx_mode, .ndo_set_mac_address = mvpp2_set_mac_address, .ndo_change_mtu = mvpp2_change_mtu, .ndo_get_stats64 = mvpp2_get_stats64, .ndo_do_ioctl = mvpp2_ioctl, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_link = ethtool_op_get_link, .get_settings = mvpp2_ethtool_get_settings, .set_settings = mvpp2_ethtool_set_settings, .set_coalesce = mvpp2_ethtool_set_coalesce, .get_coalesce = mvpp2_ethtool_get_coalesce, .get_drvinfo = mvpp2_ethtool_get_drvinfo, .get_ringparam = mvpp2_ethtool_get_ringparam, .set_ringparam = mvpp2_ethtool_set_ringparam, }; /* Driver initialization */ static void mvpp2_port_power_up(struct mvpp2_port *port) { mvpp2_port_mii_set(port); mvpp2_port_periodic_xon_disable(port); mvpp2_port_fc_adv_enable(port); mvpp2_port_reset(port); } /* Initialize port HW */ static int mvpp2_port_init(struct mvpp2_port *port) { struct device *dev = port->dev->dev.parent; struct mvpp2 *priv = port->priv; struct mvpp2_txq_pcpu *txq_pcpu; int queue, cpu, err; if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM) return -EINVAL; /* Disable port */ mvpp2_egress_disable(port); mvpp2_port_disable(port); port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), GFP_KERNEL); if (!port->txqs) return -ENOMEM; /* Associate physical Tx queues to this port and initialize. * The mapping is predefined. */ for (queue = 0; queue < txq_number; queue++) { int queue_phy_id = mvpp2_txq_phys(port->id, queue); struct mvpp2_tx_queue *txq; txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); if (!txq) return -ENOMEM; txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); if (!txq->pcpu) { err = -ENOMEM; goto err_free_percpu; } txq->id = queue_phy_id; txq->log_id = queue; txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); txq_pcpu->cpu = cpu; } port->txqs[queue] = txq; } port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), GFP_KERNEL); if (!port->rxqs) { err = -ENOMEM; goto err_free_percpu; } /* Allocate and initialize Rx queue for this port */ for (queue = 0; queue < rxq_number; queue++) { struct mvpp2_rx_queue *rxq; /* Map physical Rx queue to port's logical Rx queue */ rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); if (!rxq) goto err_free_percpu; /* Map this Rx queue to a physical queue */ rxq->id = port->first_rxq + queue; rxq->port = port->id; rxq->logic_rxq = queue; port->rxqs[queue] = rxq; } /* Configure Rx queue group interrupt for this port */ mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number); /* Create Rx descriptor rings */ for (queue = 0; queue < rxq_number; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->size = port->rx_ring_size; rxq->pkts_coal = MVPP2_RX_COAL_PKTS; rxq->time_coal = MVPP2_RX_COAL_USEC; } mvpp2_ingress_disable(port); /* Port default configuration */ mvpp2_defaults_set(port); /* Port's classifier configuration */ mvpp2_cls_oversize_rxq_set(port); mvpp2_cls_port_config(port); /* Provide an initial Rx packet size */ port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); /* Initialize pools for swf */ err = mvpp2_swf_bm_pool_init(port); if (err) goto err_free_percpu; return 0; err_free_percpu: for (queue = 0; queue < txq_number; queue++) { if (!port->txqs[queue]) continue; free_percpu(port->txqs[queue]->pcpu); } return err; } /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct device_node *port_node, struct mvpp2 *priv, int *next_first_rxq) { struct device_node *phy_node; struct mvpp2_port *port; struct net_device *dev; struct resource *res; const char *dt_mac_addr; const char *mac_from; char hw_mac_addr[ETH_ALEN]; u32 id; int features; int phy_mode; int priv_common_regs_num = 2; int err, i; dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, rxq_number); if (!dev) return -ENOMEM; phy_node = of_parse_phandle(port_node, "phy", 0); if (!phy_node) { dev_err(&pdev->dev, "missing phy\n"); err = -ENODEV; goto err_free_netdev; } phy_mode = of_get_phy_mode(port_node); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy mode\n"); err = phy_mode; goto err_free_netdev; } if (of_property_read_u32(port_node, "port-id", &id)) { err = -EINVAL; dev_err(&pdev->dev, "missing port-id value\n"); goto err_free_netdev; } dev->tx_queue_len = MVPP2_MAX_TXD; dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &mvpp2_netdev_ops; dev->ethtool_ops = &mvpp2_eth_tool_ops; port = netdev_priv(dev); port->irq = irq_of_parse_and_map(port_node, 0); if (port->irq <= 0) { err = -EINVAL; goto err_free_netdev; } if (of_property_read_bool(port_node, "marvell,loopback")) port->flags |= MVPP2_F_LOOPBACK; port->priv = priv; port->id = id; port->first_rxq = *next_first_rxq; port->phy_node = phy_node; port->phy_interface = phy_mode; res = platform_get_resource(pdev, IORESOURCE_MEM, priv_common_regs_num + id); port->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(port->base)) { err = PTR_ERR(port->base); goto err_free_irq; } /* Alloc per-cpu stats */ port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); if (!port->stats) { err = -ENOMEM; goto err_free_irq; } dt_mac_addr = of_get_mac_address(port_node); if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { mac_from = "device tree"; ether_addr_copy(dev->dev_addr, dt_mac_addr); } else { mvpp2_get_mac_address(port, hw_mac_addr); if (is_valid_ether_addr(hw_mac_addr)) { mac_from = "hardware"; ether_addr_copy(dev->dev_addr, hw_mac_addr); } else { mac_from = "random"; eth_hw_addr_random(dev); } } port->tx_ring_size = MVPP2_MAX_TXD; port->rx_ring_size = MVPP2_MAX_RXD; port->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); err = mvpp2_port_init(port); if (err < 0) { dev_err(&pdev->dev, "failed to init port %d\n", id); goto err_free_stats; } mvpp2_port_power_up(port); netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); features = NETIF_F_SG | NETIF_F_IP_CSUM; dev->features = features | NETIF_F_RXCSUM; dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; dev->vlan_features |= features; err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register netdev\n"); goto err_free_txq_pcpu; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); /* Increment the first Rx queue number to be used by the next port */ *next_first_rxq += rxq_number; priv->port_list[id] = port; return 0; err_free_txq_pcpu: for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); err_free_stats: free_percpu(port->stats); err_free_irq: irq_dispose_mapping(port->irq); err_free_netdev: free_netdev(dev); return err; } /* Ports removal routine */ static void mvpp2_port_remove(struct mvpp2_port *port) { int i; unregister_netdev(port->dev); free_percpu(port->stats); for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); irq_dispose_mapping(port->irq); free_netdev(port->dev); } /* Initialize decoding windows */ static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, struct mvpp2 *priv) { u32 win_enable; int i; for (i = 0; i < 6; i++) { mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); if (i < 4) mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); } win_enable = 0; for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; mvpp2_write(priv, MVPP2_WIN_BASE(i), (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); mvpp2_write(priv, MVPP2_WIN_SIZE(i), (cs->size - 1) & 0xffff0000); win_enable |= (1 << i); } mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); } /* Initialize Rx FIFO's */ static void mvpp2_rx_fifo_init(struct mvpp2 *priv) { int port; for (port = 0; port < MVPP2_MAX_PORTS; port++) { mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), MVPP2_RX_FIFO_PORT_DATA_SIZE); mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), MVPP2_RX_FIFO_PORT_ATTR_SIZE); } mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT); mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } /* Initialize network controller common part HW */ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) { const struct mbus_dram_target_info *dram_target_info; int err, i; u32 val; /* Checks for hardware constraints */ if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) { dev_err(&pdev->dev, "invalid queue size parameter\n"); return -EINVAL; } /* MBUS windows configuration */ dram_target_info = mv_mbus_dram_info(); if (dram_target_info) mvpp2_conf_mbus_windows(dram_target_info, priv); /* Disable HW PHY polling */ val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); val |= MVPP2_PHY_AN_STOP_SMI0_MASK; writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); /* Allocate and initialize aggregated TXQs */ priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), sizeof(struct mvpp2_tx_queue), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM; for_each_present_cpu(i) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], MVPP2_AGGR_TXQ_SIZE, i, priv); if (err < 0) return err; } /* Rx Fifo Init */ mvpp2_rx_fifo_init(priv); /* Reset Rx queue group interrupt configuration */ for (i = 0; i < MVPP2_MAX_PORTS; i++) mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number); writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); /* Allow cache snoop when transmiting packets */ mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); /* Buffer Manager initialization */ err = mvpp2_bm_init(pdev, priv); if (err < 0) return err; /* Parser default initialization */ err = mvpp2_prs_default_init(pdev, priv); if (err < 0) return err; /* Classifier default initialization */ mvpp2_cls_init(priv); return 0; } static int mvpp2_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct device_node *port_node; struct mvpp2 *priv; struct resource *res; int port_count, first_rxq; int err; priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL); if (!priv) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); priv->lms_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->lms_base)) return PTR_ERR(priv->lms_base); priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); if (IS_ERR(priv->pp_clk)) return PTR_ERR(priv->pp_clk); err = clk_prepare_enable(priv->pp_clk); if (err < 0) return err; priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); if (IS_ERR(priv->gop_clk)) { err = PTR_ERR(priv->gop_clk); goto err_pp_clk; } err = clk_prepare_enable(priv->gop_clk); if (err < 0) goto err_pp_clk; /* Get system's tclk rate */ priv->tclk = clk_get_rate(priv->pp_clk); /* Initialize network controller */ err = mvpp2_init(pdev, priv); if (err < 0) { dev_err(&pdev->dev, "failed to initialize controller\n"); goto err_gop_clk; } port_count = of_get_available_child_count(dn); if (port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; goto err_gop_clk; } priv->port_list = devm_kcalloc(&pdev->dev, port_count, sizeof(struct mvpp2_port *), GFP_KERNEL); if (!priv->port_list) { err = -ENOMEM; goto err_gop_clk; } /* Initialize ports */ first_rxq = 0; for_each_available_child_of_node(dn, port_node) { err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq); if (err < 0) goto err_gop_clk; } platform_set_drvdata(pdev, priv); return 0; err_gop_clk: clk_disable_unprepare(priv->gop_clk); err_pp_clk: clk_disable_unprepare(priv->pp_clk); return err; } static int mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); struct device_node *dn = pdev->dev.of_node; struct device_node *port_node; int i = 0; for_each_available_child_of_node(dn, port_node) { if (priv->port_list[i]) mvpp2_port_remove(priv->port_list[i]); i++; } for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; mvpp2_bm_pool_destroy(pdev, priv, bm_pool); } for_each_present_cpu(i) { struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; dma_free_coherent(&pdev->dev, MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, aggr_txq->descs, aggr_txq->descs_phys); } clk_disable_unprepare(priv->pp_clk); clk_disable_unprepare(priv->gop_clk); return 0; } static const struct of_device_id mvpp2_match[] = { { .compatible = "marvell,armada-375-pp2" }, { } }; MODULE_DEVICE_TABLE(of, mvpp2_match); static struct platform_driver mvpp2_driver = { .probe = mvpp2_probe, .remove = mvpp2_remove, .driver = { .name = MVPP2_DRIVER_NAME, .of_match_table = mvpp2_match, }, }; module_platform_driver(mvpp2_driver); MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
Altaf-Mahdi/android_kernel_oneplus_msm8994
drivers/acpi/scan.c
828
54502
/* * scan.c - support for transforming the ACPI namespace into individual objects */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/signal.h> #include <linux/kthread.h> #include <linux/dmi.h> #include <linux/nls.h> #include <acpi/acpi_drivers.h> #include "internal.h" #define _COMPONENT ACPI_BUS_COMPONENT ACPI_MODULE_NAME("scan"); #define STRUCT_TO_INT(s) (*((int*)&s)) extern struct acpi_device *acpi_root; #define ACPI_BUS_CLASS "system_bus" #define ACPI_BUS_HID "LNXSYBUS" #define ACPI_BUS_DEVICE_NAME "System Bus" #define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent) static const char *dummy_hid = "device"; static LIST_HEAD(acpi_device_list); static LIST_HEAD(acpi_bus_id_list); static DEFINE_MUTEX(acpi_scan_lock); static LIST_HEAD(acpi_scan_handlers_list); DEFINE_MUTEX(acpi_device_lock); LIST_HEAD(acpi_wakeup_device_list); struct acpi_device_bus_id{ char bus_id[15]; unsigned int instance_no; struct list_head node; }; void acpi_scan_lock_acquire(void) { mutex_lock(&acpi_scan_lock); } EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire); void acpi_scan_lock_release(void) { mutex_unlock(&acpi_scan_lock); } EXPORT_SYMBOL_GPL(acpi_scan_lock_release); int acpi_scan_add_handler(struct acpi_scan_handler *handler) { if (!handler || !handler->attach) return -EINVAL; list_add_tail(&handler->list_node, &acpi_scan_handlers_list); return 0; } int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler, const char *hotplug_profile_name) { int error; error = acpi_scan_add_handler(handler); if (error) return error; acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name); return 0; } /* * Creates hid/cid(s) string needed for modalias and uevent * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: * char *modalias: "acpi:IBM0001:ACPI0001" */ static int create_modalias(struct acpi_device *acpi_dev, char *modalias, int size) { int len; int count; struct acpi_hardware_id *id; if (list_empty(&acpi_dev->pnp.ids)) return 0; len = snprintf(modalias, size, "acpi:"); size -= len; list_for_each_entry(id, &acpi_dev->pnp.ids, list) { count = snprintf(&modalias[len], size, "%s:", id->id); if (count < 0 || count >= size) return -EINVAL; len += count; size -= count; } modalias[len] = '\0'; return len; } static ssize_t acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); int len; /* Device has no HID and no CID or string is >1024 */ len = create_modalias(acpi_dev, buf, 1024); if (len <= 0) return 0; buf[len++] = '\n'; return len; } static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL); static int acpi_scan_hot_remove(struct acpi_device *device) { acpi_handle handle = device->handle; acpi_handle not_used; struct acpi_object_list arg_list; union acpi_object arg; acpi_status status; unsigned long long sta; /* If there is no handle, the device node has been unregistered. */ if (!handle) { dev_dbg(&device->dev, "ACPI handle missing\n"); put_device(&device->dev); return -EINVAL; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Hot-removing device %s...\n", dev_name(&device->dev))); acpi_bus_trim(device); /* Device node has been unregistered. */ put_device(&device->dev); device = NULL; if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &not_used))) { arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = 0; acpi_evaluate_object(handle, "_LCK", &arg_list, NULL); } arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = 1; /* * TBD: _EJD support. */ status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { return -ENODEV; } else { acpi_handle_warn(handle, "Eject failed (0x%x)\n", status); return -EIO; } } /* * Verify if eject was indeed successful. If not, log an error * message. No need to call _OST since _EJ0 call was made OK. */ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status)) { acpi_handle_warn(handle, "Status check after eject failed (0x%x)\n", status); } else if (sta & ACPI_STA_DEVICE_ENABLED) { acpi_handle_warn(handle, "Eject incomplete - status 0x%llx\n", sta); } return 0; } static void acpi_bus_device_eject(void *context) { acpi_handle handle = context; struct acpi_device *device = NULL; struct acpi_scan_handler *handler; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; mutex_lock(&acpi_scan_lock); acpi_bus_get_device(handle, &device); if (!device) goto err_out; handler = device->handler; if (!handler || !handler->hotplug.enabled) { ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; goto err_out; } acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); if (handler->hotplug.mode == AHM_CONTAINER) { device->flags.eject_pending = true; kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE); } else { int error; get_device(&device->dev); error = acpi_scan_hot_remove(device); if (error) goto err_out; } out: mutex_unlock(&acpi_scan_lock); return; err_out: acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ost_code, NULL); goto out; } static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source) { struct acpi_device *device = NULL; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; int error; mutex_lock(&acpi_scan_lock); if (ost_source != ACPI_NOTIFY_BUS_CHECK) { acpi_bus_get_device(handle, &device); if (device) { dev_warn(&device->dev, "Attempt to re-insert\n"); goto out; } } error = acpi_bus_scan(handle); if (error) { acpi_handle_warn(handle, "Namespace scan failure\n"); goto out; } error = acpi_bus_get_device(handle, &device); if (error) { acpi_handle_warn(handle, "Missing device node object\n"); goto out; } ost_code = ACPI_OST_SC_SUCCESS; if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER) kobject_uevent(&device->dev.kobj, KOBJ_ONLINE); out: acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL); mutex_unlock(&acpi_scan_lock); } static void acpi_scan_bus_check(void *context) { acpi_scan_bus_device_check((acpi_handle)context, ACPI_NOTIFY_BUS_CHECK); } static void acpi_scan_device_check(void *context) { acpi_scan_bus_device_check((acpi_handle)context, ACPI_NOTIFY_DEVICE_CHECK); } static void acpi_hotplug_unsupported(acpi_handle handle, u32 type) { u32 ost_status; switch (type) { case ACPI_NOTIFY_BUS_CHECK: acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event: unsupported\n"); ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED; break; case ACPI_NOTIFY_DEVICE_CHECK: acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event: unsupported\n"); ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED; break; case ACPI_NOTIFY_EJECT_REQUEST: acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event: unsupported\n"); ost_status = ACPI_OST_SC_EJECT_NOT_SUPPORTED; break; default: /* non-hotplug event; possibly handled by other handler */ return; } acpi_evaluate_hotplug_ost(handle, type, ost_status, NULL); } static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) { acpi_osd_exec_callback callback; struct acpi_scan_handler *handler = data; acpi_status status; if (!handler->hotplug.enabled) return acpi_hotplug_unsupported(handle, type); switch (type) { case ACPI_NOTIFY_BUS_CHECK: acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n"); callback = acpi_scan_bus_check; break; case ACPI_NOTIFY_DEVICE_CHECK: acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n"); callback = acpi_scan_device_check; break; case ACPI_NOTIFY_EJECT_REQUEST: acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); callback = acpi_bus_device_eject; break; default: /* non-hotplug event; possibly handled by other handler */ return; } status = acpi_os_hotplug_execute(callback, handle); if (ACPI_FAILURE(status)) acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); } /** * acpi_bus_hot_remove_device: hot-remove a device and its children * @context: struct acpi_eject_event pointer (freed in this func) * * Hot-remove a device and its children. This function frees up the * memory space passed by arg context, so that the caller may call * this function asynchronously through acpi_os_hotplug_execute(). */ void acpi_bus_hot_remove_device(void *context) { struct acpi_eject_event *ej_event = context; struct acpi_device *device = ej_event->device; acpi_handle handle = device->handle; int error; mutex_lock(&acpi_scan_lock); error = acpi_scan_hot_remove(device); if (error && handle) acpi_evaluate_hotplug_ost(handle, ej_event->event, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); mutex_unlock(&acpi_scan_lock); kfree(context); } EXPORT_SYMBOL(acpi_bus_hot_remove_device); static ssize_t real_power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *adev = to_acpi_device(dev); int state; int ret; ret = acpi_device_get_power(adev, &state); if (ret) return ret; return sprintf(buf, "%s\n", acpi_power_state_string(state)); } static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL); static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *adev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state)); } static DEVICE_ATTR(power_state, 0444, power_state_show, NULL); static ssize_t acpi_eject_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi_device = to_acpi_device(d); struct acpi_eject_event *ej_event; acpi_object_type not_used; acpi_status status; u32 ost_source; int ret; if (!count || buf[0] != '1') return -EINVAL; if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) && !acpi_device->driver) return -ENODEV; status = acpi_get_type(acpi_device->handle, &not_used); if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) return -ENODEV; mutex_lock(&acpi_scan_lock); if (acpi_device->flags.eject_pending) { /* ACPI eject notification event. */ ost_source = ACPI_NOTIFY_EJECT_REQUEST; acpi_device->flags.eject_pending = 0; } else { /* Eject initiated by user space. */ ost_source = ACPI_OST_EC_OSPM_EJECT; } ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); if (!ej_event) { ret = -ENOMEM; goto err_out; } acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); ej_event->device = acpi_device; ej_event->event = ost_source; get_device(&acpi_device->dev); status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event); if (ACPI_FAILURE(status)) { put_device(&acpi_device->dev); kfree(ej_event); ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; goto err_out; } ret = count; out: mutex_unlock(&acpi_scan_lock); return ret; err_out: acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); goto out; } static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store); static ssize_t acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); } static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); static ssize_t acpi_device_uid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); } static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); static ssize_t acpi_device_adr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "0x%08x\n", (unsigned int)(acpi_dev->pnp.bus_address)); } static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL); static ssize_t acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; int result; result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path); if (result) goto end; result = sprintf(buf, "%s\n", (char*)path.pointer); kfree(path.pointer); end: return result; } static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); /* sysfs file that shows description text from the ACPI _STR method */ static ssize_t description_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); int result; if (acpi_dev->pnp.str_obj == NULL) return 0; /* * The _STR object contains a Unicode identifier for a device. * We need to convert to utf-8 so it can be displayed. */ result = utf16s_to_utf8s( (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, acpi_dev->pnp.str_obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE); buf[result++] = '\n'; return result; } static DEVICE_ATTR(description, 0444, description_show, NULL); static ssize_t acpi_device_sun_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%lu\n", acpi_dev->pnp.sun); } static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); static int acpi_device_setup_files(struct acpi_device *dev) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; acpi_handle temp; unsigned long long sun; int result = 0; /* * Devices gotten from FADT don't have a "path" attribute */ if (dev->handle) { result = device_create_file(&dev->dev, &dev_attr_path); if (result) goto end; } if (!list_empty(&dev->pnp.ids)) { result = device_create_file(&dev->dev, &dev_attr_hid); if (result) goto end; result = device_create_file(&dev->dev, &dev_attr_modalias); if (result) goto end; } /* * If device has _STR, 'description' file is created */ status = acpi_get_handle(dev->handle, "_STR", &temp); if (ACPI_SUCCESS(status)) { status = acpi_evaluate_object(dev->handle, "_STR", NULL, &buffer); if (ACPI_FAILURE(status)) buffer.pointer = NULL; dev->pnp.str_obj = buffer.pointer; result = device_create_file(&dev->dev, &dev_attr_description); if (result) goto end; } if (dev->pnp.type.bus_address) result = device_create_file(&dev->dev, &dev_attr_adr); if (dev->pnp.unique_id) result = device_create_file(&dev->dev, &dev_attr_uid); status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun); if (ACPI_SUCCESS(status)) { dev->pnp.sun = (unsigned long)sun; result = device_create_file(&dev->dev, &dev_attr_sun); if (result) goto end; } else { dev->pnp.sun = (unsigned long)-1; } /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. */ status = acpi_get_handle(dev->handle, "_EJ0", &temp); if (ACPI_SUCCESS(status)) { result = device_create_file(&dev->dev, &dev_attr_eject); if (result) return result; } if (dev->flags.power_manageable) { result = device_create_file(&dev->dev, &dev_attr_power_state); if (result) return result; if (dev->power.flags.power_resources) result = device_create_file(&dev->dev, &dev_attr_real_power_state); } end: return result; } static void acpi_device_remove_files(struct acpi_device *dev) { acpi_status status; acpi_handle temp; if (dev->flags.power_manageable) { device_remove_file(&dev->dev, &dev_attr_power_state); if (dev->power.flags.power_resources) device_remove_file(&dev->dev, &dev_attr_real_power_state); } /* * If device has _STR, remove 'description' file */ status = acpi_get_handle(dev->handle, "_STR", &temp); if (ACPI_SUCCESS(status)) { kfree(dev->pnp.str_obj); device_remove_file(&dev->dev, &dev_attr_description); } /* * If device has _EJ0, remove 'eject' file. */ status = acpi_get_handle(dev->handle, "_EJ0", &temp); if (ACPI_SUCCESS(status)) device_remove_file(&dev->dev, &dev_attr_eject); status = acpi_get_handle(dev->handle, "_SUN", &temp); if (ACPI_SUCCESS(status)) device_remove_file(&dev->dev, &dev_attr_sun); if (dev->pnp.unique_id) device_remove_file(&dev->dev, &dev_attr_uid); if (dev->pnp.type.bus_address) device_remove_file(&dev->dev, &dev_attr_adr); device_remove_file(&dev->dev, &dev_attr_modalias); device_remove_file(&dev->dev, &dev_attr_hid); if (dev->handle) device_remove_file(&dev->dev, &dev_attr_path); } /* -------------------------------------------------------------------------- ACPI Bus operations -------------------------------------------------------------------------- */ static const struct acpi_device_id *__acpi_match_device( struct acpi_device *device, const struct acpi_device_id *ids) { const struct acpi_device_id *id; struct acpi_hardware_id *hwid; /* * If the device is not present, it is unnecessary to load device * driver for it. */ if (!device->status.present) return NULL; for (id = ids; id->id[0]; id++) list_for_each_entry(hwid, &device->pnp.ids, list) if (!strcmp((char *) id->id, hwid->id)) return id; return NULL; } /** * acpi_match_device - Match a struct device against a given list of ACPI IDs * @ids: Array of struct acpi_device_id object to match against. * @dev: The device structure to match. * * Check if @dev has a valid ACPI handle and if there is a struct acpi_device * object for that handle and use that object to match against a given list of * device IDs. * * Return a pointer to the first matching ID on success or %NULL on failure. */ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev) { struct acpi_device *adev; acpi_handle handle = ACPI_HANDLE(dev); if (!ids || !handle || acpi_bus_get_device(handle, &adev)) return NULL; return __acpi_match_device(adev, ids); } EXPORT_SYMBOL_GPL(acpi_match_device); int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids) { return __acpi_match_device(device, ids) ? 0 : -ENOENT; } EXPORT_SYMBOL(acpi_match_device_ids); static void acpi_free_power_resources_lists(struct acpi_device *device) { int i; if (device->wakeup.flags.valid) acpi_power_resources_list_free(&device->wakeup.resources); if (!device->flags.power_manageable) return; for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { struct acpi_device_power_state *ps = &device->power.states[i]; acpi_power_resources_list_free(&ps->resources); } } static void acpi_device_release(struct device *dev) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_free_pnp_ids(&acpi_dev->pnp); acpi_free_power_resources_lists(acpi_dev); kfree(acpi_dev); } static int acpi_bus_match(struct device *dev, struct device_driver *drv) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_driver *acpi_drv = to_acpi_driver(drv); return acpi_dev->flags.match_driver && !acpi_match_device_ids(acpi_dev, acpi_drv->ids); } static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env) { struct acpi_device *acpi_dev = to_acpi_device(dev); int len; if (list_empty(&acpi_dev->pnp.ids)) return 0; if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; len = create_modalias(acpi_dev, &env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen); if (len >= (sizeof(env->buf) - env->buflen)) return -ENOMEM; env->buflen += len; return 0; } static void acpi_device_notify(acpi_handle handle, u32 event, void *data) { struct acpi_device *device = data; device->driver->ops.notify(device, event); } static void acpi_device_notify_fixed(void *data) { struct acpi_device *device = data; /* Fixed hardware devices have no handles */ acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); } static acpi_status acpi_device_fixed_event(void *data) { acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); return AE_OK; } static int acpi_device_install_notify_handler(struct acpi_device *device) { acpi_status status; if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, acpi_device_fixed_event, device); else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) status = acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, acpi_device_fixed_event, device); else status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_device_notify, device); if (ACPI_FAILURE(status)) return -EINVAL; return 0; } static void acpi_device_remove_notify_handler(struct acpi_device *device) { if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, acpi_device_fixed_event); else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, acpi_device_fixed_event); else acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_device_notify); } static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *); static int acpi_device_probe(struct device * dev) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); int ret; ret = acpi_bus_driver_init(acpi_dev, acpi_drv); if (!ret) { if (acpi_drv->ops.notify) { ret = acpi_device_install_notify_handler(acpi_dev); if (ret) { if (acpi_drv->ops.remove) acpi_drv->ops.remove(acpi_dev); acpi_dev->driver = NULL; acpi_dev->driver_data = NULL; return ret; } } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n", acpi_drv->name, acpi_dev->pnp.bus_id)); get_device(dev); } return ret; } static int acpi_device_remove(struct device * dev) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_driver *acpi_drv = acpi_dev->driver; if (acpi_drv) { if (acpi_drv->ops.notify) acpi_device_remove_notify_handler(acpi_dev); if (acpi_drv->ops.remove) acpi_drv->ops.remove(acpi_dev); } acpi_dev->driver = NULL; acpi_dev->driver_data = NULL; put_device(dev); return 0; } struct bus_type acpi_bus_type = { .name = "acpi", .match = acpi_bus_match, .probe = acpi_device_probe, .remove = acpi_device_remove, .uevent = acpi_device_uevent, }; int acpi_device_add(struct acpi_device *device, void (*release)(struct device *)) { int result; struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id; int found = 0; if (device->handle) { acpi_status status; status = acpi_attach_data(device->handle, acpi_bus_data_handler, device); if (ACPI_FAILURE(status)) { acpi_handle_err(device->handle, "Unable to attach device data\n"); return -ENODEV; } } /* * Linkage * ------- * Link this device to its parent and siblings. */ INIT_LIST_HEAD(&device->children); INIT_LIST_HEAD(&device->node); INIT_LIST_HEAD(&device->wakeup_list); INIT_LIST_HEAD(&device->physical_node_list); mutex_init(&device->physical_node_lock); INIT_LIST_HEAD(&device->power_dependent); new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); if (!new_bus_id) { pr_err(PREFIX "Memory allocation error\n"); result = -ENOMEM; goto err_detach; } mutex_lock(&acpi_device_lock); /* * Find suitable bus_id and instance number in acpi_bus_id_list * If failed, create one and link it into acpi_bus_id_list */ list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { if (!strcmp(acpi_device_bus_id->bus_id, acpi_device_hid(device))) { acpi_device_bus_id->instance_no++; found = 1; kfree(new_bus_id); break; } } if (!found) { acpi_device_bus_id = new_bus_id; strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device)); acpi_device_bus_id->instance_no = 0; list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); } dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); if (device->parent) list_add_tail(&device->node, &device->parent->children); if (device->wakeup.flags.valid) list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); mutex_unlock(&acpi_device_lock); if (device->parent) device->dev.parent = &device->parent->dev; device->dev.bus = &acpi_bus_type; device->dev.release = release; result = device_add(&device->dev); if (result) { dev_err(&device->dev, "Error registering device\n"); goto err; } result = acpi_device_setup_files(device); if (result) printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", dev_name(&device->dev)); device->removal_type = ACPI_BUS_REMOVAL_NORMAL; return 0; err: mutex_lock(&acpi_device_lock); if (device->parent) list_del(&device->node); list_del(&device->wakeup_list); mutex_unlock(&acpi_device_lock); err_detach: acpi_detach_data(device->handle, acpi_bus_data_handler); return result; } static void acpi_device_unregister(struct acpi_device *device) { mutex_lock(&acpi_device_lock); if (device->parent) list_del(&device->node); list_del(&device->wakeup_list); mutex_unlock(&acpi_device_lock); acpi_detach_data(device->handle, acpi_bus_data_handler); acpi_power_add_remove_device(device, false); acpi_device_remove_files(device); if (device->remove) device->remove(device); device_del(&device->dev); /* * Transition the device to D3cold to drop the reference counts of all * power resources the device depends on and turn off the ones that have * no more references. */ acpi_device_set_power(device, ACPI_STATE_D3_COLD); device->handle = NULL; put_device(&device->dev); } /* -------------------------------------------------------------------------- Driver Management -------------------------------------------------------------------------- */ /** * acpi_bus_driver_init - add a device to a driver * @device: the device to add and initialize * @driver: driver for the device * * Used to initialize a device via its device driver. Called whenever a * driver is bound to a device. Invokes the driver's add() ops. */ static int acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver) { int result = 0; if (!device || !driver) return -EINVAL; if (!driver->ops.add) return -ENOSYS; result = driver->ops.add(device); if (result) return result; device->driver = driver; /* * TBD - Configuration Management: Assign resources to device based * upon possible configuration and currently allocated resources. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Driver successfully bound to device\n")); return 0; } /** * acpi_bus_register_driver - register a driver with the ACPI bus * @driver: driver being registered * * Registers a driver with the ACPI bus. Searches the namespace for all * devices that match the driver's criteria and binds. Returns zero for * success or a negative error status for failure. */ int acpi_bus_register_driver(struct acpi_driver *driver) { int ret; if (acpi_disabled) return -ENODEV; driver->drv.name = driver->name; driver->drv.bus = &acpi_bus_type; driver->drv.owner = driver->owner; ret = driver_register(&driver->drv); return ret; } EXPORT_SYMBOL(acpi_bus_register_driver); /** * acpi_bus_unregister_driver - unregisters a driver with the APIC bus * @driver: driver to unregister * * Unregisters a driver with the ACPI bus. Searches the namespace for all * devices that match the driver's criteria and unbinds. */ void acpi_bus_unregister_driver(struct acpi_driver *driver) { driver_unregister(&driver->drv); } EXPORT_SYMBOL(acpi_bus_unregister_driver); /* -------------------------------------------------------------------------- Device Enumeration -------------------------------------------------------------------------- */ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) { struct acpi_device *device = NULL; acpi_status status; /* * Fixed hardware devices do not appear in the namespace and do not * have handles, but we fabricate acpi_devices for them, so we have * to deal with them specially. */ if (!handle) return acpi_root; do { status = acpi_get_parent(handle, &handle); if (ACPI_FAILURE(status)) return status == AE_NULL_ENTRY ? NULL : acpi_root; } while (acpi_bus_get_device(handle, &device)); return device; } acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) { acpi_status status; acpi_handle tmp; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; status = acpi_get_handle(handle, "_EJD", &tmp); if (ACPI_FAILURE(status)) return status; status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer); if (ACPI_SUCCESS(status)) { obj = buffer.pointer; status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer, ejd); kfree(buffer.pointer); } return status; } EXPORT_SYMBOL_GPL(acpi_bus_get_ejd); void acpi_bus_data_handler(acpi_handle handle, void *context) { /* TBD */ return; } static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, struct acpi_device_wakeup *wakeup) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package = NULL; union acpi_object *element = NULL; acpi_status status; int err = -ENODATA; if (!wakeup) return -EINVAL; INIT_LIST_HEAD(&wakeup->resources); /* _PRW */ status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW")); return err; } package = (union acpi_object *)buffer.pointer; if (!package || package->package.count < 2) goto out; element = &(package->package.elements[0]); if (!element) goto out; if (element->type == ACPI_TYPE_PACKAGE) { if ((element->package.count < 2) || (element->package.elements[0].type != ACPI_TYPE_LOCAL_REFERENCE) || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) goto out; wakeup->gpe_device = element->package.elements[0].reference.handle; wakeup->gpe_number = (u32) element->package.elements[1].integer.value; } else if (element->type == ACPI_TYPE_INTEGER) { wakeup->gpe_device = NULL; wakeup->gpe_number = element->integer.value; } else { goto out; } element = &(package->package.elements[1]); if (element->type != ACPI_TYPE_INTEGER) goto out; wakeup->sleep_state = element->integer.value; err = acpi_extract_power_resources(package, 2, &wakeup->resources); if (err) goto out; if (!list_empty(&wakeup->resources)) { int sleep_state; err = acpi_power_wakeup_list_init(&wakeup->resources, &sleep_state); if (err) { acpi_handle_warn(handle, "Retrieving current states " "of wakeup power resources failed\n"); acpi_power_resources_list_free(&wakeup->resources); goto out; } if (sleep_state < wakeup->sleep_state) { acpi_handle_warn(handle, "Overriding _PRW sleep state " "(S%d) by S%d from power resources\n", (int)wakeup->sleep_state, sleep_state); wakeup->sleep_state = sleep_state; } } acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number); out: kfree(buffer.pointer); return err; } static void acpi_bus_set_run_wake_flags(struct acpi_device *device) { struct acpi_device_id button_device_ids[] = { {"PNP0C0C", 0}, {"PNP0C0D", 0}, {"PNP0C0E", 0}, {"", 0}, }; acpi_status status; acpi_event_status event_status; device->wakeup.flags.notifier_present = 0; /* Power button, Lid switch always enable wakeup */ if (!acpi_match_device_ids(device, button_device_ids)) { device->wakeup.flags.run_wake = 1; if (!acpi_match_device_ids(device, &button_device_ids[1])) { /* Do not use Lid/sleep button for S5 wakeup */ if (device->wakeup.sleep_state == ACPI_STATE_S5) device->wakeup.sleep_state = ACPI_STATE_S4; } device_set_wakeup_capable(&device->dev, true); return; } status = acpi_get_gpe_status(device->wakeup.gpe_device, device->wakeup.gpe_number, &event_status); if (status == AE_OK) device->wakeup.flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HANDLE); } static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) { acpi_handle temp; acpi_status status = 0; int err; /* Presence of _PRW indicates wake capable */ status = acpi_get_handle(device->handle, "_PRW", &temp); if (ACPI_FAILURE(status)) return; err = acpi_bus_extract_wakeup_device_power_package(device->handle, &device->wakeup); if (err) { dev_err(&device->dev, "_PRW evaluation error: %d\n", err); return; } device->wakeup.flags.valid = 1; device->wakeup.prepare_count = 0; acpi_bus_set_run_wake_flags(device); /* Call _PSW/_DSW object to disable its ability to wake the sleeping * system for the ACPI device with the _PRW object. * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW. * So it is necessary to call _DSW object first. Only when it is not * present will the _PSW object used. */ err = acpi_device_sleep_wake(device, 0, 0, 0); if (err) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in _DSW or _PSW evaluation\n")); } static void acpi_bus_init_power_state(struct acpi_device *device, int state) { struct acpi_device_power_state *ps = &device->power.states[state]; char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' }; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_handle handle; acpi_status status; INIT_LIST_HEAD(&ps->resources); /* Evaluate "_PRx" to get referenced power resources */ status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer); if (ACPI_SUCCESS(status)) { union acpi_object *package = buffer.pointer; if (buffer.length && package && package->type == ACPI_TYPE_PACKAGE && package->package.count) { int err = acpi_extract_power_resources(package, 0, &ps->resources); if (!err) device->power.flags.power_resources = 1; } ACPI_FREE(buffer.pointer); } /* Evaluate "_PSx" to see if we can do explicit sets */ pathname[2] = 'S'; status = acpi_get_handle(device->handle, pathname, &handle); if (ACPI_SUCCESS(status)) ps->flags.explicit_set = 1; /* * State is valid if there are means to put the device into it. * D3hot is only valid if _PR3 present. */ if (!list_empty(&ps->resources) || (ps->flags.explicit_set && state < ACPI_STATE_D3_HOT)) { ps->flags.valid = 1; ps->flags.os_accessible = 1; } ps->power = -1; /* Unknown - driver assigned */ ps->latency = -1; /* Unknown - driver assigned */ } static void acpi_bus_get_power_flags(struct acpi_device *device) { acpi_status status; acpi_handle handle; u32 i; /* Presence of _PS0|_PR0 indicates 'power manageable' */ status = acpi_get_handle(device->handle, "_PS0", &handle); if (ACPI_FAILURE(status)) { status = acpi_get_handle(device->handle, "_PR0", &handle); if (ACPI_FAILURE(status)) return; } device->flags.power_manageable = 1; /* * Power Management Flags */ status = acpi_get_handle(device->handle, "_PSC", &handle); if (ACPI_SUCCESS(status)) device->power.flags.explicit_get = 1; status = acpi_get_handle(device->handle, "_IRC", &handle); if (ACPI_SUCCESS(status)) device->power.flags.inrush_current = 1; /* * Enumerate supported power management states */ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) acpi_bus_init_power_state(device, i); INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); /* Set defaults for D0 and D3 states (always valid) */ device->power.states[ACPI_STATE_D0].flags.valid = 1; device->power.states[ACPI_STATE_D0].power = 100; device->power.states[ACPI_STATE_D3].flags.valid = 1; device->power.states[ACPI_STATE_D3].power = 0; /* Set D3cold's explicit_set flag if _PS3 exists. */ if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; /* Presence of _PS3 or _PRx means we can put the device into D3 cold */ if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set || device->power.flags.power_resources) device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; if (acpi_bus_init_power(device)) { acpi_free_power_resources_lists(device); device->flags.power_manageable = 0; } } static void acpi_bus_get_flags(struct acpi_device *device) { acpi_status status = AE_OK; acpi_handle temp = NULL; /* Presence of _STA indicates 'dynamic_status' */ status = acpi_get_handle(device->handle, "_STA", &temp); if (ACPI_SUCCESS(status)) device->flags.dynamic_status = 1; /* Presence of _RMV indicates 'removable' */ status = acpi_get_handle(device->handle, "_RMV", &temp); if (ACPI_SUCCESS(status)) device->flags.removable = 1; /* Presence of _EJD|_EJ0 indicates 'ejectable' */ status = acpi_get_handle(device->handle, "_EJD", &temp); if (ACPI_SUCCESS(status)) device->flags.ejectable = 1; else { status = acpi_get_handle(device->handle, "_EJ0", &temp); if (ACPI_SUCCESS(status)) device->flags.ejectable = 1; } } static void acpi_device_get_busid(struct acpi_device *device) { char bus_id[5] = { '?', 0 }; struct acpi_buffer buffer = { sizeof(bus_id), bus_id }; int i = 0; /* * Bus ID * ------ * The device's Bus ID is simply the object name. * TBD: Shouldn't this value be unique (within the ACPI namespace)? */ if (ACPI_IS_ROOT_DEVICE(device)) { strcpy(device->pnp.bus_id, "ACPI"); return; } switch (device->device_type) { case ACPI_BUS_TYPE_POWER_BUTTON: strcpy(device->pnp.bus_id, "PWRF"); break; case ACPI_BUS_TYPE_SLEEP_BUTTON: strcpy(device->pnp.bus_id, "SLPF"); break; default: acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); /* Clean up trailing underscores (if any) */ for (i = 3; i > 1; i--) { if (bus_id[i] == '_') bus_id[i] = '\0'; else break; } strcpy(device->pnp.bus_id, bus_id); break; } } /* * acpi_bay_match - see if an acpi object is an ejectable driver bay * * If an acpi object is ejectable and has one of the ACPI ATA methods defined, * then we can safely call it an ejectable drive bay */ static int acpi_bay_match(acpi_handle handle) { acpi_status status; acpi_handle tmp; acpi_handle phandle; status = acpi_get_handle(handle, "_EJ0", &tmp); if (ACPI_FAILURE(status)) return -ENODEV; if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) || (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) || (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) || (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp)))) return 0; if (acpi_get_parent(handle, &phandle)) return -ENODEV; if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) || (ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) || (ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) || (ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp)))) return 0; return -ENODEV; } /* * acpi_dock_match - see if an acpi object has a _DCK method */ static int acpi_dock_match(acpi_handle handle) { acpi_handle tmp; return acpi_get_handle(handle, "_DCK", &tmp); } const char *acpi_device_hid(struct acpi_device *device) { struct acpi_hardware_id *hid; if (list_empty(&device->pnp.ids)) return dummy_hid; hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list); return hid->id; } EXPORT_SYMBOL(acpi_device_hid); static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id) { struct acpi_hardware_id *id; id = kmalloc(sizeof(*id), GFP_KERNEL); if (!id) return; id->id = kstrdup(dev_id, GFP_KERNEL); if (!id->id) { kfree(id); return; } list_add_tail(&id->list, &pnp->ids); pnp->type.hardware_id = 1; } /* * Old IBM workstations have a DSDT bug wherein the SMBus object * lacks the SMBUS01 HID and the methods do not have the necessary "_" * prefix. Work around this. */ static int acpi_ibm_smbus_match(acpi_handle handle) { acpi_handle h_dummy; struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; int result; if (!dmi_name_in_vendors("IBM")) return -ENODEV; /* Look for SMBS object */ result = acpi_get_name(handle, ACPI_SINGLE_NAME, &path); if (result) return result; if (strcmp("SMBS", path.pointer)) { result = -ENODEV; goto out; } /* Does it have the necessary (but misnamed) methods? */ result = -ENODEV; if (ACPI_SUCCESS(acpi_get_handle(handle, "SBI", &h_dummy)) && ACPI_SUCCESS(acpi_get_handle(handle, "SBR", &h_dummy)) && ACPI_SUCCESS(acpi_get_handle(handle, "SBW", &h_dummy))) result = 0; out: kfree(path.pointer); return result; } static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, int device_type) { acpi_status status; struct acpi_device_info *info; struct acpi_pnp_device_id_list *cid_list; int i; switch (device_type) { case ACPI_BUS_TYPE_DEVICE: if (handle == ACPI_ROOT_OBJECT) { acpi_add_id(pnp, ACPI_SYSTEM_HID); break; } status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { pr_err(PREFIX "%s: Error reading device info\n", __func__); return; } if (info->valid & ACPI_VALID_HID) acpi_add_id(pnp, info->hardware_id.string); if (info->valid & ACPI_VALID_CID) { cid_list = &info->compatible_id_list; for (i = 0; i < cid_list->count; i++) acpi_add_id(pnp, cid_list->ids[i].string); } if (info->valid & ACPI_VALID_ADR) { pnp->bus_address = info->address; pnp->type.bus_address = 1; } if (info->valid & ACPI_VALID_UID) pnp->unique_id = kstrdup(info->unique_id.string, GFP_KERNEL); kfree(info); /* * Some devices don't reliably have _HIDs & _CIDs, so add * synthetic HIDs to make sure drivers can find them. */ if (acpi_is_video_device(handle)) acpi_add_id(pnp, ACPI_VIDEO_HID); else if (ACPI_SUCCESS(acpi_bay_match(handle))) acpi_add_id(pnp, ACPI_BAY_HID); else if (ACPI_SUCCESS(acpi_dock_match(handle))) acpi_add_id(pnp, ACPI_DOCK_HID); else if (!acpi_ibm_smbus_match(handle)) acpi_add_id(pnp, ACPI_SMBUS_IBM_HID); else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) { acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME); strcpy(pnp->device_class, ACPI_BUS_CLASS); } break; case ACPI_BUS_TYPE_POWER: acpi_add_id(pnp, ACPI_POWER_HID); break; case ACPI_BUS_TYPE_PROCESSOR: acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID); break; case ACPI_BUS_TYPE_THERMAL: acpi_add_id(pnp, ACPI_THERMAL_HID); break; case ACPI_BUS_TYPE_POWER_BUTTON: acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF); break; case ACPI_BUS_TYPE_SLEEP_BUTTON: acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF); break; } } void acpi_free_pnp_ids(struct acpi_device_pnp *pnp) { struct acpi_hardware_id *id, *tmp; list_for_each_entry_safe(id, tmp, &pnp->ids, list) { kfree(id->id); kfree(id); } kfree(pnp->unique_id); } void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, int type, unsigned long long sta) { INIT_LIST_HEAD(&device->pnp.ids); device->device_type = type; device->handle = handle; device->parent = acpi_bus_get_parent(handle); STRUCT_TO_INT(device->status) = sta; acpi_device_get_busid(device); acpi_set_pnp_ids(handle, &device->pnp, type); acpi_bus_get_flags(device); device->flags.match_driver = false; device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); } void acpi_device_add_finalize(struct acpi_device *device) { device->flags.match_driver = true; dev_set_uevent_suppress(&device->dev, false); kobject_uevent(&device->dev.kobj, KOBJ_ADD); } static int acpi_add_single_object(struct acpi_device **child, acpi_handle handle, int type, unsigned long long sta) { int result; struct acpi_device *device; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL); if (!device) { printk(KERN_ERR PREFIX "Memory allocation error\n"); return -ENOMEM; } acpi_init_device_object(device, handle, type, sta); acpi_bus_get_power_flags(device); acpi_bus_get_wakeup_device_flags(device); result = acpi_device_add(device, acpi_device_release); if (result) { acpi_device_release(&device->dev); return result; } acpi_power_add_remove_device(device, true); acpi_device_add_finalize(device); acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n", dev_name(&device->dev), (char *) buffer.pointer, device->parent ? dev_name(&device->parent->dev) : "(null)")); kfree(buffer.pointer); *child = device; return 0; } static int acpi_bus_type_and_status(acpi_handle handle, int *type, unsigned long long *sta) { acpi_status status; acpi_object_type acpi_type; status = acpi_get_type(handle, &acpi_type); if (ACPI_FAILURE(status)) return -ENODEV; switch (acpi_type) { case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ case ACPI_TYPE_DEVICE: *type = ACPI_BUS_TYPE_DEVICE; status = acpi_bus_get_status_handle(handle, sta); if (ACPI_FAILURE(status)) return -ENODEV; break; case ACPI_TYPE_PROCESSOR: *type = ACPI_BUS_TYPE_PROCESSOR; status = acpi_bus_get_status_handle(handle, sta); if (ACPI_FAILURE(status)) return -ENODEV; break; case ACPI_TYPE_THERMAL: *type = ACPI_BUS_TYPE_THERMAL; *sta = ACPI_STA_DEFAULT; break; case ACPI_TYPE_POWER: *type = ACPI_BUS_TYPE_POWER; *sta = ACPI_STA_DEFAULT; break; default: return -ENODEV; } return 0; } static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler, char *idstr, const struct acpi_device_id **matchid) { const struct acpi_device_id *devid; for (devid = handler->ids; devid->id[0]; devid++) if (!strcmp((char *)devid->id, idstr)) { if (matchid) *matchid = devid; return true; } return false; } static struct acpi_scan_handler *acpi_scan_match_handler(char *idstr, const struct acpi_device_id **matchid) { struct acpi_scan_handler *handler; list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) if (acpi_scan_handler_matching(handler, idstr, matchid)) return handler; return NULL; } void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) { if (!!hotplug->enabled == !!val) return; mutex_lock(&acpi_scan_lock); hotplug->enabled = val; mutex_unlock(&acpi_scan_lock); } static void acpi_scan_init_hotplug(acpi_handle handle, int type) { struct acpi_device_pnp pnp = {}; struct acpi_hardware_id *hwid; struct acpi_scan_handler *handler; INIT_LIST_HEAD(&pnp.ids); acpi_set_pnp_ids(handle, &pnp, type); if (!pnp.type.hardware_id) goto out; /* * This relies on the fact that acpi_install_notify_handler() will not * install the same notify handler routine twice for the same handle. */ list_for_each_entry(hwid, &pnp.ids, list) { handler = acpi_scan_match_handler(hwid->id, NULL); if (handler && !handler->hotplug.ignore) { acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_hotplug_notify_cb, handler); break; } } out: acpi_free_pnp_ids(&pnp); } static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, void *not_used, void **return_value) { struct acpi_device *device = NULL; int type; unsigned long long sta; acpi_status status; int result; acpi_bus_get_device(handle, &device); if (device) goto out; result = acpi_bus_type_and_status(handle, &type, &sta); if (result) return AE_OK; if (type == ACPI_BUS_TYPE_POWER) { acpi_add_power_resource(handle); return AE_OK; } acpi_scan_init_hotplug(handle, type); if (!(sta & ACPI_STA_DEVICE_PRESENT) && !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { struct acpi_device_wakeup wakeup; acpi_handle temp; status = acpi_get_handle(handle, "_PRW", &temp); if (ACPI_SUCCESS(status)) { acpi_bus_extract_wakeup_device_power_package(handle, &wakeup); acpi_power_resources_list_free(&wakeup.resources); } return AE_CTRL_DEPTH; } acpi_add_single_object(&device, handle, type, sta); if (!device) return AE_CTRL_DEPTH; out: if (!*return_value) *return_value = device; return AE_OK; } static int acpi_scan_attach_handler(struct acpi_device *device) { struct acpi_hardware_id *hwid; int ret = 0; list_for_each_entry(hwid, &device->pnp.ids, list) { const struct acpi_device_id *devid; struct acpi_scan_handler *handler; handler = acpi_scan_match_handler(hwid->id, &devid); if (handler) { ret = handler->attach(device, devid); if (ret > 0) { device->handler = handler; break; } else if (ret < 0) { break; } } } return ret; } static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used, void *not_used, void **ret_not_used) { struct acpi_device *device; unsigned long long sta_not_used; int ret; /* * Ignore errors ignored by acpi_bus_check_add() to avoid terminating * namespace walks prematurely. */ if (acpi_bus_type_and_status(handle, &ret, &sta_not_used)) return AE_OK; if (acpi_bus_get_device(handle, &device)) return AE_CTRL_DEPTH; if (device->handler) return AE_OK; ret = acpi_scan_attach_handler(device); if (ret) return ret > 0 ? AE_OK : AE_CTRL_DEPTH; ret = device_attach(&device->dev); return ret >= 0 ? AE_OK : AE_CTRL_DEPTH; } /** * acpi_bus_scan - Add ACPI device node objects in a given namespace scope. * @handle: Root of the namespace scope to scan. * * Scan a given ACPI tree (probably recently hot-plugged) and create and add * found devices. * * If no devices were found, -ENODEV is returned, but it does not mean that * there has been a real error. There just have been no suitable ACPI objects * in the table trunk from which the kernel could create a device and add an * appropriate driver. * * Must be called under acpi_scan_lock. */ int acpi_bus_scan(acpi_handle handle) { void *device = NULL; int error = 0; if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device))) acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, acpi_bus_check_add, NULL, NULL, &device); if (!device) error = -ENODEV; else if (ACPI_SUCCESS(acpi_bus_device_attach(handle, 0, NULL, NULL))) acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, acpi_bus_device_attach, NULL, NULL, NULL); return error; } EXPORT_SYMBOL(acpi_bus_scan); static acpi_status acpi_bus_device_detach(acpi_handle handle, u32 lvl_not_used, void *not_used, void **ret_not_used) { struct acpi_device *device = NULL; if (!acpi_bus_get_device(handle, &device)) { struct acpi_scan_handler *dev_handler = device->handler; device->removal_type = ACPI_BUS_REMOVAL_EJECT; if (dev_handler) { if (dev_handler->detach) dev_handler->detach(device); device->handler = NULL; } else { device_release_driver(&device->dev); } } return AE_OK; } static acpi_status acpi_bus_remove(acpi_handle handle, u32 lvl_not_used, void *not_used, void **ret_not_used) { struct acpi_device *device = NULL; if (!acpi_bus_get_device(handle, &device)) acpi_device_unregister(device); return AE_OK; } /** * acpi_bus_trim - Remove ACPI device node and all of its descendants * @start: Root of the ACPI device nodes subtree to remove. * * Must be called under acpi_scan_lock. */ void acpi_bus_trim(struct acpi_device *start) { /* * Execute acpi_bus_device_detach() as a post-order callback to detach * all ACPI drivers from the device nodes being removed. */ acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL, acpi_bus_device_detach, NULL, NULL); acpi_bus_device_detach(start->handle, 0, NULL, NULL); /* * Execute acpi_bus_remove() as a post-order callback to remove device * nodes in the given namespace scope. */ acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL, acpi_bus_remove, NULL, NULL); acpi_bus_remove(start->handle, 0, NULL, NULL); } EXPORT_SYMBOL_GPL(acpi_bus_trim); static int acpi_bus_scan_fixed(void) { int result = 0; /* * Enumerate all fixed-feature devices. */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) { struct acpi_device *device = NULL; result = acpi_add_single_object(&device, NULL, ACPI_BUS_TYPE_POWER_BUTTON, ACPI_STA_DEFAULT); if (result) return result; result = device_attach(&device->dev); if (result < 0) return result; device_init_wakeup(&device->dev, true); } if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) { struct acpi_device *device = NULL; result = acpi_add_single_object(&device, NULL, ACPI_BUS_TYPE_SLEEP_BUTTON, ACPI_STA_DEFAULT); if (result) return result; result = device_attach(&device->dev); } return result < 0 ? result : 0; } int __init acpi_scan_init(void) { int result; result = bus_register(&acpi_bus_type); if (result) { /* We don't want to quit even if we failed to add suspend/resume */ printk(KERN_ERR PREFIX "Could not register bus type\n"); } acpi_pci_root_init(); acpi_pci_link_init(); acpi_platform_init(); acpi_lpss_init(); acpi_cmos_rtc_init(); acpi_container_init(); acpi_memory_hotplug_init(); acpi_dock_init(); mutex_lock(&acpi_scan_lock); /* * Enumerate devices in the ACPI namespace. */ result = acpi_bus_scan(ACPI_ROOT_OBJECT); if (result) goto out; result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root); if (result) goto out; result = acpi_bus_scan_fixed(); if (result) { acpi_device_unregister(acpi_root); goto out; } acpi_update_all_gpes(); acpi_pci_root_hp_init(); out: mutex_unlock(&acpi_scan_lock); return result; }
gpl-2.0
Arc-Team/android_kernel_htc_holiday
net/mac80211/work.c
828
33058
/* * mac80211 work implementation * * Copyright 2003-2008, Jouni Malinen <j@w1.fi> * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2009, Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/slab.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "ieee80211_i.h" #include "rate.h" #define IEEE80211_AUTH_TIMEOUT (HZ / 5) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) #define IEEE80211_ASSOC_MAX_TRIES 3 enum work_action { WORK_ACT_MISMATCH, WORK_ACT_NONE, WORK_ACT_TIMEOUT, WORK_ACT_DONE, }; /* utils */ static inline void ASSERT_WORK_MTX(struct ieee80211_local *local) { lockdep_assert_held(&local->mtx); } /* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only * reschedule it when it should fire _earlier_ than it was * asked for before, or if it's not pending right now. This * function ensures that. Note that it then is required to * run this function for all timeouts after the first one * has happened -- the work that runs from this timer will * do that. */ static void run_again(struct ieee80211_local *local, unsigned long timeout) { ASSERT_WORK_MTX(local); if (!timer_pending(&local->work_timer) || time_before(timeout, local->work_timer.expires)) mod_timer(&local->work_timer, timeout); } void free_work(struct ieee80211_work *wk) { kfree_rcu(wk, rcu_head); } static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, struct ieee80211_supported_band *sband, u32 *rates) { int i, j, count; *rates = 0; count = 0; for (i = 0; i < supp_rates_len; i++) { int rate = (supp_rates[i] & 0x7F) * 5; for (j = 0; j < sband->n_bitrates; j++) if (sband->bitrates[j].bitrate == rate) { *rates |= BIT(j); count++; break; } } return count; } /* frame sending functions */ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, enum ieee80211_smps_mode smps) { struct ieee80211_ht_info *ht_info; u8 *pos; u32 flags = channel->flags; u16 cap = sband->ht_cap.cap; __le16 tmp; if (!sband->ht_cap.ht_supported) return; if (!ht_info_ie) return; if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info)) return; ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2); /* determine capability flags */ switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: if (flags & IEEE80211_CHAN_NO_HT40PLUS) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; cap &= ~IEEE80211_HT_CAP_SGI_40; } break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: if (flags & IEEE80211_CHAN_NO_HT40MINUS) { cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; cap &= ~IEEE80211_HT_CAP_SGI_40; } break; } /* set SM PS mode properly */ cap &= ~IEEE80211_HT_CAP_SM_PS; switch (smps) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); case IEEE80211_SMPS_OFF: cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; break; case IEEE80211_SMPS_STATIC: cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; break; case IEEE80211_SMPS_DYNAMIC: cap |= WLAN_HT_CAP_SM_PS_DYNAMIC << IEEE80211_HT_CAP_SM_PS_SHIFT; break; } /* reserve and fill IE */ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); *pos++ = WLAN_EID_HT_CAPABILITY; *pos++ = sizeof(struct ieee80211_ht_cap); memset(pos, 0, sizeof(struct ieee80211_ht_cap)); /* capability flags */ tmp = cpu_to_le16(cap); memcpy(pos, &tmp, sizeof(u16)); pos += sizeof(u16); /* AMPDU parameters */ *pos++ = sband->ht_cap.ampdu_factor | (sband->ht_cap.ampdu_density << IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); /* MCS set */ memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); pos += sizeof(sband->ht_cap.mcs); /* extended capabilities */ pos += sizeof(__le16); /* BF capabilities */ pos += sizeof(__le32); /* antenna selection */ pos += sizeof(u8); } static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_work *wk) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u8 *pos, qos_info; size_t offset = 0, noffset; int i, count, rates_len, supp_rates_len; u16 capab; struct ieee80211_supported_band *sband; u32 rates = 0; sband = local->hw.wiphy->bands[wk->chan->band]; if (wk->assoc.supp_rates_len) { /* * Get all rates supported by the device and the AP as * some APs don't like getting a superset of their rates * in the association request (e.g. D-Link DAP 1353 in * b-only mode)... */ rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates, wk->assoc.supp_rates_len, sband, &rates); } else { /* * In case AP not provide any supported rates information * before association, we send information element(s) with * all rates that we support. */ rates = ~0; rates_len = sband->n_bitrates; } skb = alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + /* bit too much but doesn't matter */ 2 + wk->assoc.ssid_len + /* SSID */ 4 + rates_len + /* (extended) rates */ 4 + /* power capability */ 2 + 2 * sband->n_channels + /* supported channels */ 2 + sizeof(struct ieee80211_ht_cap) + /* HT */ wk->ie_len + /* extra IEs */ 9, /* WMM */ GFP_KERNEL); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " "frame\n", sdata->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); capab = WLAN_CAPABILITY_ESS; if (sband->band == IEEE80211_BAND_2GHZ) { if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)) capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME; if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)) capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; } if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY) capab |= WLAN_CAPABILITY_PRIVACY; if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) && (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)) capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, wk->filter_ta, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN); if (!is_zero_ether_addr(wk->assoc.prev_bssid)) { skb_put(skb, 10); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); mgmt->u.reassoc_req.listen_interval = cpu_to_le16(local->hw.conf.listen_interval); memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid, ETH_ALEN); } else { skb_put(skb, 4); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); mgmt->u.assoc_req.listen_interval = cpu_to_le16(local->hw.conf.listen_interval); } /* SSID */ pos = skb_put(skb, 2 + wk->assoc.ssid_len); *pos++ = WLAN_EID_SSID; *pos++ = wk->assoc.ssid_len; memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len); /* add all rates which were marked to be used above */ supp_rates_len = rates_len; if (supp_rates_len > 8) supp_rates_len = 8; pos = skb_put(skb, supp_rates_len + 2); *pos++ = WLAN_EID_SUPP_RATES; *pos++ = supp_rates_len; count = 0; for (i = 0; i < sband->n_bitrates; i++) { if (BIT(i) & rates) { int rate = sband->bitrates[i].bitrate; *pos++ = (u8) (rate / 5); if (++count == 8) break; } } if (rates_len > count) { pos = skb_put(skb, rates_len - count + 2); *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = rates_len - count; for (i++; i < sband->n_bitrates; i++) { if (BIT(i) & rates) { int rate = sband->bitrates[i].bitrate; *pos++ = (u8) (rate / 5); } } } if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) { /* 1. power capabilities */ pos = skb_put(skb, 4); *pos++ = WLAN_EID_PWR_CAPABILITY; *pos++ = 2; *pos++ = 0; /* min tx power */ *pos++ = wk->chan->max_power; /* max tx power */ /* 2. supported channels */ /* TODO: get this in reg domain format */ pos = skb_put(skb, 2 * sband->n_channels + 2); *pos++ = WLAN_EID_SUPPORTED_CHANNELS; *pos++ = 2 * sband->n_channels; for (i = 0; i < sband->n_channels; i++) { *pos++ = ieee80211_frequency_to_channel( sband->channels[i].center_freq); *pos++ = 1; /* one channel in the subband*/ } } /* if present, add any custom IEs that go before HT */ if (wk->ie_len && wk->ie) { static const u8 before_ht[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_EXT_SUPP_RATES, WLAN_EID_PWR_CAPABILITY, WLAN_EID_SUPPORTED_CHANNELS, WLAN_EID_RSN, WLAN_EID_QOS_CAPA, WLAN_EID_RRM_ENABLED_CAPABILITIES, WLAN_EID_MOBILITY_DOMAIN, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; noffset = ieee80211_ie_split(wk->ie, wk->ie_len, before_ht, ARRAY_SIZE(before_ht), offset); pos = skb_put(skb, noffset - offset); memcpy(pos, wk->ie + offset, noffset - offset); offset = noffset; } if (wk->assoc.use_11n && wk->assoc.wmm_used && local->hw.queues >= 4) ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie, sband, wk->chan, wk->assoc.smps); /* if present, add any custom non-vendor IEs that go after HT */ if (wk->ie_len && wk->ie) { noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len, offset); pos = skb_put(skb, noffset - offset); memcpy(pos, wk->ie + offset, noffset - offset); offset = noffset; } if (wk->assoc.wmm_used && local->hw.queues >= 4) { if (wk->assoc.uapsd_used) { qos_info = local->uapsd_queues; qos_info |= (local->uapsd_max_sp_len << IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT); } else { qos_info = 0; } pos = skb_put(skb, 9); *pos++ = WLAN_EID_VENDOR_SPECIFIC; *pos++ = 7; /* len */ *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */ *pos++ = 0x50; *pos++ = 0xf2; *pos++ = 2; /* WME */ *pos++ = 0; /* WME info */ *pos++ = 1; /* WME ver */ *pos++ = qos_info; } /* add any remaining custom (i.e. vendor specific here) IEs */ if (wk->ie_len && wk->ie) { noffset = wk->ie_len; pos = skb_put(skb, noffset - offset); memcpy(pos, wk->ie + offset, noffset - offset); } IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } static void ieee80211_remove_auth_bss(struct ieee80211_local *local, struct ieee80211_work *wk) { struct cfg80211_bss *cbss; u16 capa_val = WLAN_CAPABILITY_ESS; if (wk->probe_auth.privacy) capa_val |= WLAN_CAPABILITY_PRIVACY; cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta, wk->probe_auth.ssid, wk->probe_auth.ssid_len, WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, capa_val); if (!cbss) return; cfg80211_unlink_bss(local->hw.wiphy, cbss); cfg80211_put_bss(cbss); } static enum work_action __must_check ieee80211_direct_probe(struct ieee80211_work *wk) { struct ieee80211_sub_if_data *sdata = wk->sdata; struct ieee80211_local *local = sdata->local; wk->probe_auth.tries++; if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) { printk(KERN_DEBUG "%s: direct probe to %pM timed out\n", sdata->name, wk->filter_ta); /* * Most likely AP is not in the range so remove the * bss struct for that AP. */ ieee80211_remove_auth_bss(local, wk); return WORK_ACT_TIMEOUT; } printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n", sdata->name, wk->filter_ta, wk->probe_auth.tries, IEEE80211_AUTH_MAX_TRIES); /* * Direct probe is sent to broadcast address as some APs * will not answer to direct packet in unassociated state. */ ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid, wk->probe_auth.ssid_len, NULL, 0); wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; run_again(local, wk->timeout); return WORK_ACT_NONE; } static enum work_action __must_check ieee80211_authenticate(struct ieee80211_work *wk) { struct ieee80211_sub_if_data *sdata = wk->sdata; struct ieee80211_local *local = sdata->local; wk->probe_auth.tries++; if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) { printk(KERN_DEBUG "%s: authentication with %pM" " timed out\n", sdata->name, wk->filter_ta); /* * Most likely AP is not in the range so remove the * bss struct for that AP. */ ieee80211_remove_auth_bss(local, wk); return WORK_ACT_TIMEOUT; } printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n", sdata->name, wk->filter_ta, wk->probe_auth.tries); ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie, wk->ie_len, wk->filter_ta, NULL, 0, 0); wk->probe_auth.transaction = 2; wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; run_again(local, wk->timeout); return WORK_ACT_NONE; } static enum work_action __must_check ieee80211_associate(struct ieee80211_work *wk) { struct ieee80211_sub_if_data *sdata = wk->sdata; struct ieee80211_local *local = sdata->local; wk->assoc.tries++; if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) { printk(KERN_DEBUG "%s: association with %pM" " timed out\n", sdata->name, wk->filter_ta); /* * Most likely AP is not in the range so remove the * bss struct for that AP. */ if (wk->assoc.bss) cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss); return WORK_ACT_TIMEOUT; } printk(KERN_DEBUG "%s: associate with %pM (try %d)\n", sdata->name, wk->filter_ta, wk->assoc.tries); ieee80211_send_assoc(sdata, wk); wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; run_again(local, wk->timeout); return WORK_ACT_NONE; } static enum work_action __must_check ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk) { /* * First time we run, do nothing -- the generic code will * have switched to the right channel etc. */ if (!wk->started) { wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration); cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk, wk->chan, wk->chan_type, wk->remain.duration, GFP_KERNEL); return WORK_ACT_NONE; } return WORK_ACT_TIMEOUT; } static enum work_action __must_check ieee80211_offchannel_tx(struct ieee80211_work *wk) { if (!wk->started) { wk->timeout = jiffies + msecs_to_jiffies(wk->offchan_tx.wait); /* * After this, offchan_tx.frame remains but now is no * longer a valid pointer -- we still need it as the * cookie for canceling this work/status matching. */ ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame); return WORK_ACT_NONE; } return WORK_ACT_TIMEOUT; } static enum work_action __must_check ieee80211_assoc_beacon_wait(struct ieee80211_work *wk) { if (wk->started) return WORK_ACT_TIMEOUT; /* * Wait up to one beacon interval ... * should this be more if we miss one? */ printk(KERN_DEBUG "%s: waiting for beacon from %pM\n", wk->sdata->name, wk->filter_ta); wk->timeout = TU_TO_EXP_TIME(wk->assoc.bss->beacon_interval); return WORK_ACT_NONE; } static void ieee80211_auth_challenge(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_sub_if_data *sdata = wk->sdata; u8 *pos; struct ieee802_11_elems elems; pos = mgmt->u.auth.variable; ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); if (!elems.challenge) return; ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm, elems.challenge - 2, elems.challenge_len + 2, wk->filter_ta, wk->probe_auth.key, wk->probe_auth.key_len, wk->probe_auth.key_idx); wk->probe_auth.transaction = 4; } static enum work_action __must_check ieee80211_rx_mgmt_auth(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len) { u16 auth_alg, auth_transaction, status_code; if (wk->type != IEEE80211_WORK_AUTH) return WORK_ACT_MISMATCH; if (len < 24 + 6) return WORK_ACT_NONE; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); status_code = le16_to_cpu(mgmt->u.auth.status_code); if (auth_alg != wk->probe_auth.algorithm || auth_transaction != wk->probe_auth.transaction) return WORK_ACT_NONE; if (status_code != WLAN_STATUS_SUCCESS) { printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n", wk->sdata->name, mgmt->sa, status_code); return WORK_ACT_DONE; } switch (wk->probe_auth.algorithm) { case WLAN_AUTH_OPEN: case WLAN_AUTH_LEAP: case WLAN_AUTH_FT: break; case WLAN_AUTH_SHARED_KEY: if (wk->probe_auth.transaction != 4) { ieee80211_auth_challenge(wk, mgmt, len); /* need another frame */ return WORK_ACT_NONE; } break; default: WARN_ON(1); return WORK_ACT_NONE; } printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name); return WORK_ACT_DONE; } static enum work_action __must_check ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len, bool reassoc) { struct ieee80211_sub_if_data *sdata = wk->sdata; struct ieee80211_local *local = sdata->local; u16 capab_info, status_code, aid; struct ieee802_11_elems elems; u8 *pos; if (wk->type != IEEE80211_WORK_ASSOC) return WORK_ACT_MISMATCH; /* * AssocResp and ReassocResp have identical structure, so process both * of them in this function. */ if (len < 24 + 6) return WORK_ACT_NONE; capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); aid = le16_to_cpu(mgmt->u.assoc_resp.aid); printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x " "status=%d aid=%d)\n", sdata->name, reassoc ? "Rea" : "A", mgmt->sa, capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); pos = mgmt->u.assoc_resp.variable; ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems); if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY && elems.timeout_int && elems.timeout_int_len == 5 && elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) { u32 tu, ms; tu = get_unaligned_le32(elems.timeout_int + 1); ms = tu * 1024 / 1000; printk(KERN_DEBUG "%s: %pM rejected association temporarily; " "comeback duration %u TU (%u ms)\n", sdata->name, mgmt->sa, tu, ms); wk->timeout = jiffies + msecs_to_jiffies(ms); if (ms > IEEE80211_ASSOC_TIMEOUT) run_again(local, wk->timeout); return WORK_ACT_NONE; } if (status_code != WLAN_STATUS_SUCCESS) printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n", sdata->name, mgmt->sa, status_code); else printk(KERN_DEBUG "%s: associated\n", sdata->name); return WORK_ACT_DONE; } static enum work_action __must_check ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_sub_if_data *sdata = wk->sdata; struct ieee80211_local *local = sdata->local; size_t baselen; ASSERT_WORK_MTX(local); if (wk->type != IEEE80211_WORK_DIRECT_PROBE) return WORK_ACT_MISMATCH; if (len < 24 + 12) return WORK_ACT_NONE; baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return WORK_ACT_NONE; printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name); return WORK_ACT_DONE; } static enum work_action __must_check ieee80211_rx_mgmt_beacon(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_sub_if_data *sdata = wk->sdata; struct ieee80211_local *local = sdata->local; ASSERT_WORK_MTX(local); if (wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) return WORK_ACT_MISMATCH; if (len < 24 + 12) return WORK_ACT_NONE; printk(KERN_DEBUG "%s: beacon received\n", sdata->name); return WORK_ACT_DONE; } static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; struct ieee80211_work *wk; enum work_action rma = WORK_ACT_NONE; u16 fc; rx_status = (struct ieee80211_rx_status *) skb->cb; mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); mutex_lock(&local->mtx); list_for_each_entry(wk, &local->work_list, list) { const u8 *bssid = NULL; switch (wk->type) { case IEEE80211_WORK_DIRECT_PROBE: case IEEE80211_WORK_AUTH: case IEEE80211_WORK_ASSOC: case IEEE80211_WORK_ASSOC_BEACON_WAIT: bssid = wk->filter_ta; break; default: continue; } /* * Before queuing, we already verified mgmt->sa, * so this is needed just for matching. */ if (compare_ether_addr(bssid, mgmt->bssid)) continue; switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: rma = ieee80211_rx_mgmt_beacon(wk, mgmt, skb->len); break; case IEEE80211_STYPE_PROBE_RESP: rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_AUTH: rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len); break; case IEEE80211_STYPE_ASSOC_RESP: rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt, skb->len, false); break; case IEEE80211_STYPE_REASSOC_RESP: rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt, skb->len, true); break; default: WARN_ON(1); rma = WORK_ACT_NONE; } /* * We've either received an unexpected frame, or we have * multiple work items and need to match the frame to the * right one. */ if (rma == WORK_ACT_MISMATCH) continue; /* * We've processed this frame for that work, so it can't * belong to another work struct. * NB: this is also required for correctness for 'rma'! */ break; } switch (rma) { case WORK_ACT_MISMATCH: /* ignore this unmatched frame */ break; case WORK_ACT_NONE: break; case WORK_ACT_DONE: list_del_rcu(&wk->list); break; default: WARN(1, "unexpected: %d", rma); } mutex_unlock(&local->mtx); if (rma != WORK_ACT_DONE) goto out; switch (wk->done(wk, skb)) { case WORK_DONE_DESTROY: free_work(wk); break; case WORK_DONE_REQUEUE: synchronize_rcu(); wk->started = false; /* restart */ mutex_lock(&local->mtx); list_add_tail(&wk->list, &local->work_list); mutex_unlock(&local->mtx); } out: kfree_skb(skb); } static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct, enum nl80211_channel_type oper_ct) { switch (wk_ct) { case NL80211_CHAN_NO_HT: return true; case NL80211_CHAN_HT20: if (oper_ct != NL80211_CHAN_NO_HT) return true; return false; case NL80211_CHAN_HT40MINUS: case NL80211_CHAN_HT40PLUS: return (wk_ct == oper_ct); } WARN_ON(1); /* shouldn't get here */ return false; } static enum nl80211_channel_type ieee80211_calc_ct(enum nl80211_channel_type wk_ct, enum nl80211_channel_type oper_ct) { switch (wk_ct) { case NL80211_CHAN_NO_HT: return oper_ct; case NL80211_CHAN_HT20: if (oper_ct != NL80211_CHAN_NO_HT) return oper_ct; return wk_ct; case NL80211_CHAN_HT40MINUS: case NL80211_CHAN_HT40PLUS: return wk_ct; } WARN_ON(1); /* shouldn't get here */ return wk_ct; } static void ieee80211_work_timer(unsigned long data) { struct ieee80211_local *local = (void *) data; if (local->quiescing) return; ieee80211_queue_work(&local->hw, &local->work_work); } static void ieee80211_work_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, work_work); struct sk_buff *skb; struct ieee80211_work *wk, *tmp; LIST_HEAD(free_work); enum work_action rma; bool remain_off_channel = false; if (local->scanning) return; /* * ieee80211_queue_work() should have picked up most cases, * here we'll pick the rest. */ if (WARN(local->suspended, "work scheduled while going to suspend\n")) return; /* first process frames to avoid timing out while a frame is pending */ while ((skb = skb_dequeue(&local->work_skb_queue))) ieee80211_work_rx_queued_mgmt(local, skb); mutex_lock(&local->mtx); ieee80211_recalc_idle(local); list_for_each_entry_safe(wk, tmp, &local->work_list, list) { bool started = wk->started; /* mark work as started if it's on the current off-channel */ if (!started && local->tmp_channel && wk->chan == local->tmp_channel && wk->chan_type == local->tmp_channel_type) { started = true; wk->timeout = jiffies; } if (!started && !local->tmp_channel) { bool on_oper_chan; bool tmp_chan_changed = false; bool on_oper_chan2; enum nl80211_channel_type wk_ct; on_oper_chan = ieee80211_cfg_on_oper_channel(local); /* Work with existing channel type if possible. */ wk_ct = wk->chan_type; if (wk->chan == local->hw.conf.channel) wk_ct = ieee80211_calc_ct(wk->chan_type, local->hw.conf.channel_type); if (local->tmp_channel) if ((local->tmp_channel != wk->chan) || (local->tmp_channel_type != wk_ct)) tmp_chan_changed = true; local->tmp_channel = wk->chan; local->tmp_channel_type = wk_ct; /* * Leave the station vifs in awake mode if they * happen to be on the same channel as * the requested channel. */ on_oper_chan2 = ieee80211_cfg_on_oper_channel(local); if (on_oper_chan != on_oper_chan2) { if (on_oper_chan2) { /* going off oper channel, PS too */ ieee80211_offchannel_stop_vifs(local); ieee80211_hw_config(local, 0); } else { /* going on channel, but leave PS * off-channel. */ ieee80211_hw_config(local, 0); ieee80211_offchannel_return(local, true); } } else if (tmp_chan_changed) /* Still off-channel, but on some other * channel, so update hardware. * PS should already be off-channel. */ ieee80211_hw_config(local, 0); started = true; wk->timeout = jiffies; } /* don't try to work with items that aren't started */ if (!started) continue; if (time_is_after_jiffies(wk->timeout)) { /* * This work item isn't supposed to be worked on * right now, but take care to adjust the timer * properly. */ run_again(local, wk->timeout); continue; } switch (wk->type) { default: WARN_ON(1); /* nothing */ rma = WORK_ACT_NONE; break; case IEEE80211_WORK_ABORT: rma = WORK_ACT_TIMEOUT; break; case IEEE80211_WORK_DIRECT_PROBE: rma = ieee80211_direct_probe(wk); break; case IEEE80211_WORK_AUTH: rma = ieee80211_authenticate(wk); break; case IEEE80211_WORK_ASSOC: rma = ieee80211_associate(wk); break; case IEEE80211_WORK_REMAIN_ON_CHANNEL: rma = ieee80211_remain_on_channel_timeout(wk); break; case IEEE80211_WORK_OFFCHANNEL_TX: rma = ieee80211_offchannel_tx(wk); break; case IEEE80211_WORK_ASSOC_BEACON_WAIT: rma = ieee80211_assoc_beacon_wait(wk); break; } wk->started = started; switch (rma) { case WORK_ACT_NONE: /* might have changed the timeout */ run_again(local, wk->timeout); break; case WORK_ACT_TIMEOUT: list_del_rcu(&wk->list); synchronize_rcu(); list_add(&wk->list, &free_work); break; default: WARN(1, "unexpected: %d", rma); } } list_for_each_entry(wk, &local->work_list, list) { if (!wk->started) continue; if (wk->chan != local->tmp_channel) continue; if (!ieee80211_work_ct_coexists(wk->chan_type, local->tmp_channel_type)) continue; remain_off_channel = true; } if (!remain_off_channel && local->tmp_channel) { local->tmp_channel = NULL; /* If tmp_channel wasn't operating channel, then * we need to go back on-channel. * NOTE: If we can ever be here while scannning, * or if the hw_config() channel config logic changes, * then we may need to do a more thorough check to see if * we still need to do a hardware config. Currently, * we cannot be here while scanning, however. */ if (!ieee80211_cfg_on_oper_channel(local)) ieee80211_hw_config(local, 0); /* At the least, we need to disable offchannel_ps, * so just go ahead and run the entire offchannel * return logic here. We *could* skip enabling * beaconing if we were already on-oper-channel * as a future optimization. */ ieee80211_offchannel_return(local, true); /* give connection some time to breathe */ run_again(local, jiffies + HZ/2); } if (list_empty(&local->work_list) && local->scan_req && !local->scanning) ieee80211_queue_delayed_work(&local->hw, &local->scan_work, round_jiffies_relative(0)); ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); list_for_each_entry_safe(wk, tmp, &free_work, list) { wk->done(wk, NULL); list_del(&wk->list); kfree(wk); } } void ieee80211_add_work(struct ieee80211_work *wk) { struct ieee80211_local *local; if (WARN_ON(!wk->chan)) return; if (WARN_ON(!wk->sdata)) return; if (WARN_ON(!wk->done)) return; if (WARN_ON(!ieee80211_sdata_running(wk->sdata))) return; wk->started = false; local = wk->sdata->local; mutex_lock(&local->mtx); list_add_tail(&wk->list, &local->work_list); mutex_unlock(&local->mtx); ieee80211_queue_work(&local->hw, &local->work_work); } void ieee80211_work_init(struct ieee80211_local *local) { INIT_LIST_HEAD(&local->work_list); setup_timer(&local->work_timer, ieee80211_work_timer, (unsigned long)local); INIT_WORK(&local->work_work, ieee80211_work_work); skb_queue_head_init(&local->work_skb_queue); } void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_work *wk; bool cleanup = false; mutex_lock(&local->mtx); list_for_each_entry(wk, &local->work_list, list) { if (wk->sdata != sdata) continue; cleanup = true; wk->type = IEEE80211_WORK_ABORT; wk->started = true; wk->timeout = jiffies; } mutex_unlock(&local->mtx); /* run cleanups etc. */ if (cleanup) ieee80211_work_work(&local->work_work); mutex_lock(&local->mtx); list_for_each_entry(wk, &local->work_list, list) { if (wk->sdata != sdata) continue; WARN_ON(1); break; } mutex_unlock(&local->mtx); } ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct ieee80211_work *wk; u16 fc; if (skb->len < 24) return RX_DROP_MONITOR; mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); list_for_each_entry_rcu(wk, &local->work_list, list) { if (sdata != wk->sdata) continue; if (compare_ether_addr(wk->filter_ta, mgmt->sa)) continue; if (compare_ether_addr(wk->filter_ta, mgmt->bssid)) continue; switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_AUTH: case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: case IEEE80211_STYPE_BEACON: skb_queue_tail(&local->work_skb_queue, skb); ieee80211_queue_work(&local->hw, &local->work_work); return RX_QUEUED; } } return RX_CONTINUE; } static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk, struct sk_buff *skb) { /* * We are done serving the remain-on-channel command. */ cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk, wk->chan, wk->chan_type, GFP_KERNEL); return WORK_DONE_DESTROY; } int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, unsigned int duration, u64 *cookie) { struct ieee80211_work *wk; wk = kzalloc(sizeof(*wk), GFP_KERNEL); if (!wk) return -ENOMEM; wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL; wk->chan = chan; wk->chan_type = channel_type; wk->sdata = sdata; wk->done = ieee80211_remain_done; wk->remain.duration = duration; *cookie = (unsigned long) wk; ieee80211_add_work(wk); return 0; } int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata, u64 cookie) { struct ieee80211_local *local = sdata->local; struct ieee80211_work *wk, *tmp; bool found = false; mutex_lock(&local->mtx); list_for_each_entry_safe(wk, tmp, &local->work_list, list) { if ((unsigned long) wk == cookie) { wk->timeout = jiffies; found = true; break; } } mutex_unlock(&local->mtx); if (!found) return -ENOENT; ieee80211_queue_work(&local->hw, &local->work_work); return 0; }
gpl-2.0
malsony/linux
drivers/edac/i3200_edac.c
828
13164
/* * Intel 3200/3210 Memory Controller kernel module * Copyright (C) 2008-2009 Akamai Technologies, Inc. * Portions by Hitoshi Mitake <h.mitake@gmail.com>. * * This file may be distributed under the terms of the * GNU General Public License. */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include <linux/io.h> #include "edac_core.h" #include <asm-generic/io-64-nonatomic-lo-hi.h> #define I3200_REVISION "1.1" #define EDAC_MOD_STR "i3200_edac" #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 #define I3200_DIMMS 4 #define I3200_RANKS 8 #define I3200_RANKS_PER_CHANNEL 4 #define I3200_CHANNELS 2 /* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */ #define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ #define I3200_MCHBAR_HIGH 0x4c #define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ #define I3200_MMR_WINDOW_SIZE 16384 #define I3200_TOM 0xa0 /* Top of Memory (16b) * * 15:10 reserved * 9:0 total populated physical memory */ #define I3200_TOM_MASK 0x3ff /* bits 9:0 */ #define I3200_TOM_SHIFT 26 /* 64MiB grain */ #define I3200_ERRSTS 0xc8 /* Error Status Register (16b) * * 15 reserved * 14 Isochronous TBWRR Run Behind FIFO Full * (ITCV) * 13 Isochronous TBWRR Run Behind FIFO Put * (ITSTV) * 12 reserved * 11 MCH Thermal Sensor Event * for SMI/SCI/SERR (GTSE) * 10 reserved * 9 LOCK to non-DRAM Memory Flag (LCKF) * 8 reserved * 7 DRAM Throttle Flag (DTF) * 6:2 reserved * 1 Multi-bit DRAM ECC Error Flag (DMERR) * 0 Single-bit DRAM ECC Error Flag (DSERR) */ #define I3200_ERRSTS_UE 0x0002 #define I3200_ERRSTS_CE 0x0001 #define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE) /* Intel MMIO register space - device 0 function 0 - MMR space */ #define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) * * 15:10 reserved * 9:0 Channel 0 DRAM Rank Boundary Address */ #define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ #define I3200_DRB_MASK 0x3ff /* bits 9:0 */ #define I3200_DRB_SHIFT 26 /* 64MiB grain */ #define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) * * 63:48 Error Column Address (ERRCOL) * 47:32 Error Row Address (ERRROW) * 31:29 Error Bank Address (ERRBANK) * 28:27 Error Rank Address (ERRRANK) * 26:24 reserved * 23:16 Error Syndrome (ERRSYND) * 15: 2 reserved * 1 Multiple Bit Error Status (MERRSTS) * 0 Correctable Error Status (CERRSTS) */ #define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */ #define I3200_ECCERRLOG_CE 0x1 #define I3200_ECCERRLOG_UE 0x2 #define I3200_ECCERRLOG_RANK_BITS 0x18000000 #define I3200_ECCERRLOG_RANK_SHIFT 27 #define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000 #define I3200_ECCERRLOG_SYNDROME_SHIFT 16 #define I3200_CAPID0 0xe0 /* P.95 of spec for details */ struct i3200_priv { void __iomem *window; }; static int nr_channels; static int how_many_channels(struct pci_dev *pdev) { int n_channels; unsigned char capid0_8b; /* 8th byte of CAPID0 */ pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ edac_dbg(0, "In single channel mode\n"); n_channels = 1; } else { edac_dbg(0, "In dual channel mode\n"); n_channels = 2; } if (capid0_8b & 0x10) /* check if both channels are filled */ edac_dbg(0, "2 DIMMS per channel disabled\n"); else edac_dbg(0, "2 DIMMS per channel enabled\n"); return n_channels; } static unsigned long eccerrlog_syndrome(u64 log) { return (log & I3200_ECCERRLOG_SYNDROME_BITS) >> I3200_ECCERRLOG_SYNDROME_SHIFT; } static int eccerrlog_row(int channel, u64 log) { u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >> I3200_ECCERRLOG_RANK_SHIFT); return rank | (channel * I3200_RANKS_PER_CHANNEL); } enum i3200_chips { I3200 = 0, }; struct i3200_dev_info { const char *ctl_name; }; struct i3200_error_info { u16 errsts; u16 errsts2; u64 eccerrlog[I3200_CHANNELS]; }; static const struct i3200_dev_info i3200_devs[] = { [I3200] = { .ctl_name = "i3200" }, }; static struct pci_dev *mci_pdev; static int i3200_registered = 1; static void i3200_clear_error_info(struct mem_ctl_info *mci) { struct pci_dev *pdev; pdev = to_pci_dev(mci->pdev); /* * Clear any error bits. * (Yes, we really clear bits by writing 1 to them.) */ pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS, I3200_ERRSTS_BITS); } static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci, struct i3200_error_info *info) { struct pci_dev *pdev; struct i3200_priv *priv = mci->pvt_info; void __iomem *window = priv->window; pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the * registers at once and the registers can transition from CE being * overwritten by UE. */ pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts); if (!(info->errsts & I3200_ERRSTS_BITS)) return; info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); if (nr_channels == 2) info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2); /* * If the error is the same for both reads then the first set * of reads is valid. If there is a change then there is a CE * with no info and the second set of reads is valid and * should be UE info. */ if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); if (nr_channels == 2) info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); } i3200_clear_error_info(mci); } static void i3200_process_error_info(struct mem_ctl_info *mci, struct i3200_error_info *info) { int channel; u64 log; if (!(info->errsts & I3200_ERRSTS_BITS)) return; if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, "UE overwrote CE", ""); info->errsts = info->errsts2; } for (channel = 0; channel < nr_channels; channel++) { log = info->eccerrlog[channel]; if (log & I3200_ECCERRLOG_UE) { edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, eccerrlog_row(channel, log), -1, -1, "i3000 UE", ""); } else if (log & I3200_ECCERRLOG_CE) { edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, eccerrlog_syndrome(log), eccerrlog_row(channel, log), -1, -1, "i3000 CE", ""); } } } static void i3200_check(struct mem_ctl_info *mci) { struct i3200_error_info info; edac_dbg(1, "MC%d\n", mci->mc_idx); i3200_get_and_clear_error_info(mci, &info); i3200_process_error_info(mci, &info); } static void __iomem *i3200_map_mchbar(struct pci_dev *pdev) { union { u64 mchbar; struct { u32 mchbar_low; u32 mchbar_high; }; } u; void __iomem *window; pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low); pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high); u.mchbar &= I3200_MCHBAR_MASK; if (u.mchbar != (resource_size_t)u.mchbar) { printk(KERN_ERR "i3200: mmio space beyond accessible range (0x%llx)\n", (unsigned long long)u.mchbar); return NULL; } window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE); if (!window) printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); return window; } static void i3200_get_drbs(void __iomem *window, u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) { int i; for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) { drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK; drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK; edac_dbg(0, "drb[0][%d] = %d, drb[1][%d] = %d\n", i, drbs[0][i], i, drbs[1][i]); } } static bool i3200_is_stacked(struct pci_dev *pdev, u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) { u16 tom; pci_read_config_word(pdev, I3200_TOM, &tom); tom &= I3200_TOM_MASK; return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom; } static unsigned long drb_to_nr_pages( u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked, int channel, int rank) { int n; n = drbs[channel][rank]; if (!n) return 0; if (rank > 0) n -= drbs[channel][rank - 1]; if (stacked && (channel == 1) && drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1]) n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1]; n <<= (I3200_DRB_SHIFT - PAGE_SHIFT); return n; } static int i3200_probe1(struct pci_dev *pdev, int dev_idx) { int rc; int i, j; struct mem_ctl_info *mci = NULL; struct edac_mc_layer layers[2]; u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; bool stacked; void __iomem *window; struct i3200_priv *priv; edac_dbg(0, "MC:\n"); window = i3200_map_mchbar(pdev); if (!window) return -ENODEV; i3200_get_drbs(window, drbs); nr_channels = how_many_channels(pdev); layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = I3200_DIMMS; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = nr_channels; layers[1].is_virt_csrow = false; mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct i3200_priv)); if (!mci) return -ENOMEM; edac_dbg(3, "MC: init mci\n"); mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I3200_REVISION; mci->ctl_name = i3200_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = i3200_check; mci->ctl_page_to_phys = NULL; priv = mci->pvt_info; priv->window = window; stacked = i3200_is_stacked(pdev, drbs); /* * The dram rank boundary (DRB) reg values are boundary addresses * for each DRAM rank with a granularity of 64MB. DRB regs are * cumulative; the last one will contain the total memory * contained in all ranks. */ for (i = 0; i < I3200_DIMMS; i++) { unsigned long nr_pages; for (j = 0; j < nr_channels; j++) { struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); nr_pages = drb_to_nr_pages(drbs, stacked, j, i); if (nr_pages == 0) continue; edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j, stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages)); dimm->nr_pages = nr_pages; dimm->grain = nr_pages << PAGE_SHIFT; dimm->mtype = MEM_DDR2; dimm->dtype = DEV_UNKNOWN; dimm->edac_mode = EDAC_UNKNOWN; } } i3200_clear_error_info(mci); rc = -ENODEV; if (edac_mc_add_mc(mci)) { edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); goto fail; } /* get this far and it's successful */ edac_dbg(3, "MC: success\n"); return 0; fail: iounmap(window); if (mci) edac_mc_free(mci); return rc; } static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; edac_dbg(0, "MC:\n"); if (pci_enable_device(pdev) < 0) return -EIO; rc = i3200_probe1(pdev, ent->driver_data); if (!mci_pdev) mci_pdev = pci_dev_get(pdev); return rc; } static void i3200_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct i3200_priv *priv; edac_dbg(0, "\n"); mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; priv = mci->pvt_info; iounmap(priv->window); edac_mc_free(mci); pci_disable_device(pdev); } static const struct pci_device_id i3200_pci_tbl[] = { { PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I3200}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i3200_pci_tbl); static struct pci_driver i3200_driver = { .name = EDAC_MOD_STR, .probe = i3200_init_one, .remove = i3200_remove_one, .id_table = i3200_pci_tbl, }; static int __init i3200_init(void) { int pci_rc; edac_dbg(3, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i3200_driver); if (pci_rc < 0) goto fail0; if (!mci_pdev) { i3200_registered = 0; mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_3200_HB, NULL); if (!mci_pdev) { edac_dbg(0, "i3200 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); if (pci_rc < 0) { edac_dbg(0, "i3200 init fail\n"); pci_rc = -ENODEV; goto fail1; } } return 0; fail1: pci_unregister_driver(&i3200_driver); fail0: pci_dev_put(mci_pdev); return pci_rc; } static void __exit i3200_exit(void) { edac_dbg(3, "MC:\n"); pci_unregister_driver(&i3200_driver); if (!i3200_registered) { i3200_remove_one(mci_pdev); pci_dev_put(mci_pdev); } } module_init(i3200_init); module_exit(i3200_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Akamai Technologies, Inc."); MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
finch0219/linux
net/netfilter/nf_conntrack_acct.c
1084
3103
/* Accouting handling for netfilter. */ /* * (C) 2008 Krzysztof Piotr Oledzki <ole@ans.pl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netfilter.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/export.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_acct.h> static bool nf_ct_acct __read_mostly; module_param_named(acct, nf_ct_acct, bool, 0644); MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting."); #ifdef CONFIG_SYSCTL static struct ctl_table acct_sysctl_table[] = { { .procname = "nf_conntrack_acct", .data = &init_net.ct.sysctl_acct, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; #endif /* CONFIG_SYSCTL */ unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) { struct nf_conn_acct *acct; struct nf_conn_counter *counter; acct = nf_conn_acct_find(ct); if (!acct) return 0; counter = acct->counter; seq_printf(s, "packets=%llu bytes=%llu ", (unsigned long long)atomic64_read(&counter[dir].packets), (unsigned long long)atomic64_read(&counter[dir].bytes)); return 0; }; EXPORT_SYMBOL_GPL(seq_print_acct); static struct nf_ct_ext_type acct_extend __read_mostly = { .len = sizeof(struct nf_conn_acct), .align = __alignof__(struct nf_conn_acct), .id = NF_CT_EXT_ACCT, }; #ifdef CONFIG_SYSCTL static int nf_conntrack_acct_init_sysctl(struct net *net) { struct ctl_table *table; table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table), GFP_KERNEL); if (!table) goto out; table[0].data = &net->ct.sysctl_acct; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", table); if (!net->ct.acct_sysctl_header) { printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n"); goto out_register; } return 0; out_register: kfree(table); out: return -ENOMEM; } static void nf_conntrack_acct_fini_sysctl(struct net *net) { struct ctl_table *table; table = net->ct.acct_sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->ct.acct_sysctl_header); kfree(table); } #else static int nf_conntrack_acct_init_sysctl(struct net *net) { return 0; } static void nf_conntrack_acct_fini_sysctl(struct net *net) { } #endif int nf_conntrack_acct_pernet_init(struct net *net) { net->ct.sysctl_acct = nf_ct_acct; return nf_conntrack_acct_init_sysctl(net); } void nf_conntrack_acct_pernet_fini(struct net *net) { nf_conntrack_acct_fini_sysctl(net); } int nf_conntrack_acct_init(void) { int ret = nf_ct_extend_register(&acct_extend); if (ret < 0) pr_err("nf_conntrack_acct: Unable to register extension\n"); return ret; } void nf_conntrack_acct_fini(void) { nf_ct_extend_unregister(&acct_extend); }
gpl-2.0
ghbhaha/AK-OnePone
arch/x86/kernel/cpu/mshyperv.c
1084
2155
/* * HyperV Detection code. * * Copyright (C) 2010, Novell, Inc. * Author : K. Y. Srinivasan <ksrinivasan@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * */ #include <linux/types.h> #include <linux/time.h> #include <linux/clocksource.h> #include <linux/module.h> #include <asm/processor.h> #include <asm/hypervisor.h> #include <asm/hyperv.h> #include <asm/mshyperv.h> struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); static bool __init ms_hyperv_platform(void) { u32 eax; u32 hyp_signature[3]; if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return false; cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); return eax >= HYPERV_CPUID_MIN && eax <= HYPERV_CPUID_MAX && !memcmp("Microsoft Hv", hyp_signature, 12); } static cycle_t read_hv_clock(struct clocksource *arg) { cycle_t current_tick; /* * Read the partition counter to get the current tick count. This count * is set to 0 when the partition is created and is incremented in * 100 nanosecond units. */ rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick); return current_tick; } static struct clocksource hyperv_cs = { .name = "hyperv_clocksource", .rating = 400, /* use this when running on Hyperv*/ .read = read_hv_clock, .mask = CLOCKSOURCE_MASK(64), }; static void __init ms_hyperv_init_platform(void) { /* * Extract the features and hints */ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", ms_hyperv.features, ms_hyperv.hints); if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft HyperV", .detect = ms_hyperv_platform, .init_platform = ms_hyperv_init_platform, }; EXPORT_SYMBOL(x86_hyper_ms_hyperv);
gpl-2.0
wulsic/android_kernel_samsung_nevispcm11
drivers/gpu/drm/nouveau/nv50_evo.c
2108
11061
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_dma.h" #include "nouveau_ramht.h" #include "nv50_display.h" static void nv50_evo_channel_del(struct nouveau_channel **pevo) { struct nouveau_channel *evo = *pevo; if (!evo) return; *pevo = NULL; nouveau_gpuobj_channel_takedown(evo); nouveau_bo_unmap(evo->pushbuf_bo); nouveau_bo_ref(NULL, &evo->pushbuf_bo); if (evo->user) iounmap(evo->user); kfree(evo); } void nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size) { struct drm_nouveau_private *dev_priv = obj->dev->dev_private; u32 flags5; if (dev_priv->chipset < 0xc0) { /* not supported on 0x50, specified in format mthd */ if (dev_priv->chipset == 0x50) memtype = 0; flags5 = 0x00010000; } else { if (memtype & 0x80000000) flags5 = 0x00000000; /* large pages */ else flags5 = 0x00020000; } nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM, NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0); nv_wo32(obj, 0x14, flags5); dev_priv->engine.instmem.flush(obj->dev); } int nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, u64 base, u64 size, struct nouveau_gpuobj **pobj) { struct nv50_display *disp = nv50_display(evo->dev); struct nouveau_gpuobj *obj = NULL; int ret; ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj); if (ret) return ret; obj->engine = NVOBJ_ENGINE_DISPLAY; nv50_evo_dmaobj_init(obj, memtype, base, size); ret = nouveau_ramht_insert(evo, handle, obj); if (ret) goto out; if (pobj) nouveau_gpuobj_ref(obj, pobj); out: nouveau_gpuobj_ref(NULL, &obj); return ret; } static int nv50_evo_channel_new(struct drm_device *dev, int chid, struct nouveau_channel **pevo) { struct nv50_display *disp = nv50_display(dev); struct nouveau_channel *evo; int ret; evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); if (!evo) return -ENOMEM; *pevo = evo; evo->id = chid; evo->dev = dev; evo->user_get = 4; evo->user_put = 0; ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, &evo->pushbuf_bo); if (ret == 0) ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); nv50_evo_channel_del(pevo); return ret; } ret = nouveau_bo_map(evo->pushbuf_bo); if (ret) { NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); nv50_evo_channel_del(pevo); return ret; } evo->user = ioremap(pci_resource_start(dev->pdev, 0) + NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); if (!evo->user) { NV_ERROR(dev, "Error mapping EVO control regs.\n"); nv50_evo_channel_del(pevo); return -ENOMEM; } /* bind primary evo channel's ramht to the channel */ if (disp->master && evo != disp->master) nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL); return 0; } static int nv50_evo_channel_init(struct nouveau_channel *evo) { struct drm_device *dev = evo->dev; int id = evo->id, ret, i; u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; u32 tmp; tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); if ((tmp & 0x009f0000) == 0x00020000) nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); if ((tmp & 0x003f0000) == 0x00030000) nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); /* initialise fifo */ nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | NV50_PDISPLAY_EVO_DMA_CB_VALID); nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); return -EBUSY; } /* enable error reporting on the channel */ nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); evo->dma.max = (4096/4) - 2; evo->dma.max &= ~7; evo->dma.put = 0; evo->dma.cur = evo->dma.put; evo->dma.free = evo->dma.max - evo->dma.cur; ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); if (ret) return ret; for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) OUT_RING(evo, 0); return 0; } static void nv50_evo_channel_fini(struct nouveau_channel *evo) { struct drm_device *dev = evo->dev; int id = evo->id; nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); } } static void nv50_evo_destroy(struct drm_device *dev) { struct nv50_display *disp = nv50_display(dev); int i; for (i = 0; i < 2; i++) { if (disp->crtc[i].sem.bo) { nouveau_bo_unmap(disp->crtc[i].sem.bo); nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo); } nv50_evo_channel_del(&disp->crtc[i].sync); } nouveau_gpuobj_ref(NULL, &disp->ntfy); nv50_evo_channel_del(&disp->master); } static int nv50_evo_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_display *disp = nv50_display(dev); struct nouveau_gpuobj *ramht = NULL; struct nouveau_channel *evo; int ret, i, j; /* create primary evo channel, the one we use for modesetting * purporses */ ret = nv50_evo_channel_new(dev, 0, &disp->master); if (ret) return ret; evo = disp->master; /* setup object management on it, any other evo channel will * use this also as there's no per-channel support on the * hardware */ ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); if (ret) { NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); goto err; } ret = drm_mm_init(&evo->ramin_heap, 0, 32768); if (ret) { NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); goto err; } ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); if (ret) { NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); goto err; } ret = nouveau_ramht_new(dev, ramht, &evo->ramht); nouveau_gpuobj_ref(NULL, &ramht); if (ret) goto err; /* not sure exactly what this is.. * * the first dword of the structure is used by nvidia to wait on * full completion of an EVO "update" command. * * method 0x8c on the master evo channel will fill a lot more of * this structure with some undefined info */ ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0, NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy); if (ret) goto err; ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, disp->ntfy->vinst, disp->ntfy->size, NULL); if (ret) goto err; /* create some default objects for the scanout memtypes we support */ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000, 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000, 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), 0, dev_priv->vram_size, NULL); if (ret) goto err; /* create "display sync" channels and other structures we need * to implement page flipping */ for (i = 0; i < 2; i++) { struct nv50_display_crtc *dispc = &disp->crtc[i]; u64 offset; ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync); if (ret) goto err; ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM, 0, 0x0000, &dispc->sem.bo); if (!ret) { offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT; ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM); if (!ret) ret = nouveau_bo_map(dispc->sem.bo); if (ret) nouveau_bo_ref(NULL, &dispc->sem.bo); } if (ret) goto err; ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000, offset, 4096, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000, 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), 0, dev_priv->vram_size, NULL); if (ret) goto err; ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), 0, dev_priv->vram_size, NULL); if (ret) goto err; for (j = 0; j < 4096; j += 4) nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000); dispc->sem.offset = 0; } return 0; err: nv50_evo_destroy(dev); return ret; } int nv50_evo_init(struct drm_device *dev) { struct nv50_display *disp = nv50_display(dev); int ret, i; if (!disp->master) { ret = nv50_evo_create(dev); if (ret) return ret; } ret = nv50_evo_channel_init(disp->master); if (ret) return ret; for (i = 0; i < 2; i++) { ret = nv50_evo_channel_init(disp->crtc[i].sync); if (ret) return ret; } return 0; } void nv50_evo_fini(struct drm_device *dev) { struct nv50_display *disp = nv50_display(dev); int i; for (i = 0; i < 2; i++) { if (disp->crtc[i].sync) nv50_evo_channel_fini(disp->crtc[i].sync); } if (disp->master) nv50_evo_channel_fini(disp->master); nv50_evo_destroy(dev); }
gpl-2.0
bigbiff/android_kernel_samsung_sm-p605
scripts/mod/file2alias.c
2364
34685
/* Simple code to turn various tables in an ELF file into alias definitions. * This deals with kernel datastructures where they should be * dealt with: in the kernel source. * * Copyright 2002-2003 Rusty Russell, IBM Corporation * 2003 Kai Germaschewski * * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include "modpost.h" /* We use the ELF typedefs for kernel_ulong_t but bite the bullet and * use either stdint.h or inttypes.h for the rest. */ #if KERNEL_ELFCLASS == ELFCLASS32 typedef Elf32_Addr kernel_ulong_t; #define BITS_PER_LONG 32 #else typedef Elf64_Addr kernel_ulong_t; #define BITS_PER_LONG 64 #endif #ifdef __sun__ #include <inttypes.h> #else #include <stdint.h> #endif #include <ctype.h> #include <stdbool.h> typedef uint32_t __u32; typedef uint16_t __u16; typedef unsigned char __u8; /* Big exception to the "don't include kernel headers into userspace, which * even potentially has different endianness and word sizes, since * we handle those differences explicitly below */ #include "../../include/linux/mod_devicetable.h" /* This array collects all instances that use the generic do_table */ struct devtable { const char *device_id; /* name of table, __mod_<name>_device_table. */ unsigned long id_size; void *function; }; #define ___cat(a,b) a ## b #define __cat(a,b) ___cat(a,b) /* we need some special handling for this host tool running eventually on * Darwin. The Mach-O section handling is a bit different than ELF section * handling. The differnces in detail are: * a) we have segments which have sections * b) we need a API call to get the respective section symbols */ #if defined(__MACH__) #include <mach-o/getsect.h> #define INIT_SECTION(name) do { \ unsigned long name ## _len; \ char *__cat(pstart_,name) = getsectdata("__TEXT", \ #name, &__cat(name,_len)); \ char *__cat(pstop_,name) = __cat(pstart_,name) + \ __cat(name, _len); \ __cat(__start_,name) = (void *)__cat(pstart_,name); \ __cat(__stop_,name) = (void *)__cat(pstop_,name); \ } while (0) #define SECTION(name) __attribute__((section("__TEXT, " #name))) struct devtable **__start___devtable, **__stop___devtable; #else #define INIT_SECTION(name) /* no-op for ELF */ #define SECTION(name) __attribute__((section(#name))) /* We construct a table of pointers in an ELF section (pointers generally * go unpadded by gcc). ld creates boundary syms for us. */ extern struct devtable *__start___devtable[], *__stop___devtable[]; #endif /* __MACH__ */ #if __GNUC__ == 3 && __GNUC_MINOR__ < 3 # define __used __attribute__((__unused__)) #else # define __used __attribute__((__used__)) #endif /* Add a table entry. We test function type matches while we're here. */ #define ADD_TO_DEVTABLE(device_id, type, function) \ static struct devtable __cat(devtable,__LINE__) = { \ device_id + 0*sizeof((function)((const char *)NULL, \ (type *)NULL, \ (char *)NULL)), \ sizeof(type), (function) }; \ static struct devtable *SECTION(__devtable) __used \ __cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__) #define ADD(str, sep, cond, field) \ do { \ strcat(str, sep); \ if (cond) \ sprintf(str + strlen(str), \ sizeof(field) == 1 ? "%02X" : \ sizeof(field) == 2 ? "%04X" : \ sizeof(field) == 4 ? "%08X" : "", \ field); \ else \ sprintf(str + strlen(str), "*"); \ } while(0) /* Always end in a wildcard, for future extension */ static inline void add_wildcard(char *str) { int len = strlen(str); if (str[len - 1] != '*') strcat(str + len, "*"); } unsigned int cross_build = 0; /** * Check that sizeof(device_id type) are consistent with size of section * in .o file. If in-consistent then userspace and kernel does not agree * on actual size which is a bug. * Also verify that the final entry in the table is all zeros. * Ignore both checks if build host differ from target host and size differs. **/ static void device_id_check(const char *modname, const char *device_id, unsigned long size, unsigned long id_size, void *symval) { int i; if (size % id_size || size < id_size) { if (cross_build != 0) return; fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo " "of the size of section __mod_%s_device_table=%lu.\n" "Fix definition of struct %s_device_id " "in mod_devicetable.h\n", modname, device_id, id_size, device_id, size, device_id); } /* Verify last one is a terminator */ for (i = 0; i < id_size; i++ ) { if (*(uint8_t*)(symval+size-id_size+i)) { fprintf(stderr,"%s: struct %s_device_id is %lu bytes. " "The last of %lu is:\n", modname, device_id, id_size, size / id_size); for (i = 0; i < id_size; i++ ) fprintf(stderr,"0x%02x ", *(uint8_t*)(symval+size-id_size+i) ); fprintf(stderr,"\n"); fatal("%s: struct %s_device_id is not terminated " "with a NULL entry!\n", modname, device_id); } } } /* USB is special because the bcdDevice can be matched against a numeric range */ /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */ static void do_usb_entry(struct usb_device_id *id, unsigned int bcdDevice_initial, int bcdDevice_initial_digits, unsigned char range_lo, unsigned char range_hi, unsigned char max, struct module *mod) { char alias[500]; strcpy(alias, "usb:"); ADD(alias, "v", id->match_flags&USB_DEVICE_ID_MATCH_VENDOR, id->idVendor); ADD(alias, "p", id->match_flags&USB_DEVICE_ID_MATCH_PRODUCT, id->idProduct); strcat(alias, "d"); if (bcdDevice_initial_digits) sprintf(alias + strlen(alias), "%0*X", bcdDevice_initial_digits, bcdDevice_initial); if (range_lo == range_hi) sprintf(alias + strlen(alias), "%X", range_lo); else if (range_lo > 0 || range_hi < max) { if (range_lo > 0x9 || range_hi < 0xA) sprintf(alias + strlen(alias), "[%X-%X]", range_lo, range_hi); else { sprintf(alias + strlen(alias), range_lo < 0x9 ? "[%X-9" : "[%X", range_lo); sprintf(alias + strlen(alias), range_hi > 0xA ? "a-%X]" : "%X]", range_lo); } } if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1)) strcat(alias, "*"); ADD(alias, "dc", id->match_flags&USB_DEVICE_ID_MATCH_DEV_CLASS, id->bDeviceClass); ADD(alias, "dsc", id->match_flags&USB_DEVICE_ID_MATCH_DEV_SUBCLASS, id->bDeviceSubClass); ADD(alias, "dp", id->match_flags&USB_DEVICE_ID_MATCH_DEV_PROTOCOL, id->bDeviceProtocol); ADD(alias, "ic", id->match_flags&USB_DEVICE_ID_MATCH_INT_CLASS, id->bInterfaceClass); ADD(alias, "isc", id->match_flags&USB_DEVICE_ID_MATCH_INT_SUBCLASS, id->bInterfaceSubClass); ADD(alias, "ip", id->match_flags&USB_DEVICE_ID_MATCH_INT_PROTOCOL, id->bInterfaceProtocol); ADD(alias, "in", id->match_flags&USB_DEVICE_ID_MATCH_INT_NUMBER, id->bInterfaceNumber); add_wildcard(alias); buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); } /* Handles increment/decrement of BCD formatted integers */ /* Returns the previous value, so it works like i++ or i-- */ static unsigned int incbcd(unsigned int *bcd, int inc, unsigned char max, size_t chars) { unsigned int init = *bcd, i, j; unsigned long long c, dec = 0; /* If bcd is not in BCD format, just increment */ if (max > 0x9) { *bcd += inc; return init; } /* Convert BCD to Decimal */ for (i=0 ; i < chars ; i++) { c = (*bcd >> (i << 2)) & 0xf; c = c > 9 ? 9 : c; /* force to bcd just in case */ for (j=0 ; j < i ; j++) c = c * 10; dec += c; } /* Do our increment/decrement */ dec += inc; *bcd = 0; /* Convert back to BCD */ for (i=0 ; i < chars ; i++) { for (c=1,j=0 ; j < i ; j++) c = c * 10; c = (dec / c) % 10; *bcd += c << (i << 2); } return init; } static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod) { unsigned int devlo, devhi; unsigned char chi, clo, max; int ndigits; id->match_flags = TO_NATIVE(id->match_flags); id->idVendor = TO_NATIVE(id->idVendor); id->idProduct = TO_NATIVE(id->idProduct); devlo = id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO ? TO_NATIVE(id->bcdDevice_lo) : 0x0U; devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ? TO_NATIVE(id->bcdDevice_hi) : ~0x0U; /* Figure out if this entry is in bcd or hex format */ max = 0x9; /* Default to decimal format */ for (ndigits = 0 ; ndigits < sizeof(id->bcdDevice_lo) * 2 ; ndigits++) { clo = (devlo >> (ndigits << 2)) & 0xf; chi = ((devhi > 0x9999 ? 0x9999 : devhi) >> (ndigits << 2)) & 0xf; if (clo > max || chi > max) { max = 0xf; break; } } /* * Some modules (visor) have empty slots as placeholder for * run-time specification that results in catch-all alias */ if (!(id->idVendor | id->idProduct | id->bDeviceClass | id->bInterfaceClass)) return; /* Convert numeric bcdDevice range into fnmatch-able pattern(s) */ for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) { clo = devlo & 0xf; chi = devhi & 0xf; if (chi > max) /* If we are in bcd mode, truncate if necessary */ chi = max; devlo >>= 4; devhi >>= 4; if (devlo == devhi || !ndigits) { do_usb_entry(id, devlo, ndigits, clo, chi, max, mod); break; } if (clo > 0x0) do_usb_entry(id, incbcd(&devlo, 1, max, sizeof(id->bcdDevice_lo) * 2), ndigits, clo, max, max, mod); if (chi < max) do_usb_entry(id, incbcd(&devhi, -1, max, sizeof(id->bcdDevice_lo) * 2), ndigits, 0x0, chi, max, mod); } } static void do_usb_table(void *symval, unsigned long size, struct module *mod) { unsigned int i; const unsigned long id_size = sizeof(struct usb_device_id); device_id_check(mod->name, "usb", size, id_size, symval); /* Leave last one: it's the terminator. */ size -= id_size; for (i = 0; i < size; i += id_size) do_usb_entry_multi(symval + i, mod); } /* Looks like: hid:bNvNpN */ static int do_hid_entry(const char *filename, struct hid_device_id *id, char *alias) { id->bus = TO_NATIVE(id->bus); id->vendor = TO_NATIVE(id->vendor); id->product = TO_NATIVE(id->product); sprintf(alias, "hid:b%04X", id->bus); ADD(alias, "v", id->vendor != HID_ANY_ID, id->vendor); ADD(alias, "p", id->product != HID_ANY_ID, id->product); return 1; } ADD_TO_DEVTABLE("hid", struct hid_device_id, do_hid_entry); /* Looks like: ieee1394:venNmoNspNverN */ static int do_ieee1394_entry(const char *filename, struct ieee1394_device_id *id, char *alias) { id->match_flags = TO_NATIVE(id->match_flags); id->vendor_id = TO_NATIVE(id->vendor_id); id->model_id = TO_NATIVE(id->model_id); id->specifier_id = TO_NATIVE(id->specifier_id); id->version = TO_NATIVE(id->version); strcpy(alias, "ieee1394:"); ADD(alias, "ven", id->match_flags & IEEE1394_MATCH_VENDOR_ID, id->vendor_id); ADD(alias, "mo", id->match_flags & IEEE1394_MATCH_MODEL_ID, id->model_id); ADD(alias, "sp", id->match_flags & IEEE1394_MATCH_SPECIFIER_ID, id->specifier_id); ADD(alias, "ver", id->match_flags & IEEE1394_MATCH_VERSION, id->version); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("ieee1394", struct ieee1394_device_id, do_ieee1394_entry); /* Looks like: pci:vNdNsvNsdNbcNscNiN. */ static int do_pci_entry(const char *filename, struct pci_device_id *id, char *alias) { /* Class field can be divided into these three. */ unsigned char baseclass, subclass, interface, baseclass_mask, subclass_mask, interface_mask; id->vendor = TO_NATIVE(id->vendor); id->device = TO_NATIVE(id->device); id->subvendor = TO_NATIVE(id->subvendor); id->subdevice = TO_NATIVE(id->subdevice); id->class = TO_NATIVE(id->class); id->class_mask = TO_NATIVE(id->class_mask); strcpy(alias, "pci:"); ADD(alias, "v", id->vendor != PCI_ANY_ID, id->vendor); ADD(alias, "d", id->device != PCI_ANY_ID, id->device); ADD(alias, "sv", id->subvendor != PCI_ANY_ID, id->subvendor); ADD(alias, "sd", id->subdevice != PCI_ANY_ID, id->subdevice); baseclass = (id->class) >> 16; baseclass_mask = (id->class_mask) >> 16; subclass = (id->class) >> 8; subclass_mask = (id->class_mask) >> 8; interface = id->class; interface_mask = id->class_mask; if ((baseclass_mask != 0 && baseclass_mask != 0xFF) || (subclass_mask != 0 && subclass_mask != 0xFF) || (interface_mask != 0 && interface_mask != 0xFF)) { warn("Can't handle masks in %s:%04X\n", filename, id->class_mask); return 0; } ADD(alias, "bc", baseclass_mask == 0xFF, baseclass); ADD(alias, "sc", subclass_mask == 0xFF, subclass); ADD(alias, "i", interface_mask == 0xFF, interface); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("pci", struct pci_device_id, do_pci_entry); /* looks like: "ccw:tNmNdtNdmN" */ static int do_ccw_entry(const char *filename, struct ccw_device_id *id, char *alias) { id->match_flags = TO_NATIVE(id->match_flags); id->cu_type = TO_NATIVE(id->cu_type); id->cu_model = TO_NATIVE(id->cu_model); id->dev_type = TO_NATIVE(id->dev_type); id->dev_model = TO_NATIVE(id->dev_model); strcpy(alias, "ccw:"); ADD(alias, "t", id->match_flags&CCW_DEVICE_ID_MATCH_CU_TYPE, id->cu_type); ADD(alias, "m", id->match_flags&CCW_DEVICE_ID_MATCH_CU_MODEL, id->cu_model); ADD(alias, "dt", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE, id->dev_type); ADD(alias, "dm", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL, id->dev_model); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("ccw", struct ccw_device_id, do_ccw_entry); /* looks like: "ap:tN" */ static int do_ap_entry(const char *filename, struct ap_device_id *id, char *alias) { sprintf(alias, "ap:t%02X*", id->dev_type); return 1; } ADD_TO_DEVTABLE("ap", struct ap_device_id, do_ap_entry); /* looks like: "css:tN" */ static int do_css_entry(const char *filename, struct css_device_id *id, char *alias) { sprintf(alias, "css:t%01X", id->type); return 1; } ADD_TO_DEVTABLE("css", struct css_device_id, do_css_entry); /* Looks like: "serio:tyNprNidNexN" */ static int do_serio_entry(const char *filename, struct serio_device_id *id, char *alias) { id->type = TO_NATIVE(id->type); id->proto = TO_NATIVE(id->proto); id->id = TO_NATIVE(id->id); id->extra = TO_NATIVE(id->extra); strcpy(alias, "serio:"); ADD(alias, "ty", id->type != SERIO_ANY, id->type); ADD(alias, "pr", id->proto != SERIO_ANY, id->proto); ADD(alias, "id", id->id != SERIO_ANY, id->id); ADD(alias, "ex", id->extra != SERIO_ANY, id->extra); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("serio", struct serio_device_id, do_serio_entry); /* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */ static int do_acpi_entry(const char *filename, struct acpi_device_id *id, char *alias) { sprintf(alias, "acpi*:%s:*", id->id); return 1; } ADD_TO_DEVTABLE("acpi", struct acpi_device_id, do_acpi_entry); /* looks like: "pnp:dD" */ static void do_pnp_device_entry(void *symval, unsigned long size, struct module *mod) { const unsigned long id_size = sizeof(struct pnp_device_id); const unsigned int count = (size / id_size)-1; const struct pnp_device_id *devs = symval; unsigned int i; device_id_check(mod->name, "pnp", size, id_size, symval); for (i = 0; i < count; i++) { const char *id = (char *)devs[i].id; char acpi_id[sizeof(devs[0].id)]; int j; buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"pnp:d%s*\");\n", id); /* fix broken pnp bus lowercasing */ for (j = 0; j < sizeof(acpi_id); j++) acpi_id[j] = toupper(id[j]); buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"acpi*:%s:*\");\n", acpi_id); } } /* looks like: "pnp:dD" for every device of the card */ static void do_pnp_card_entries(void *symval, unsigned long size, struct module *mod) { const unsigned long id_size = sizeof(struct pnp_card_device_id); const unsigned int count = (size / id_size)-1; const struct pnp_card_device_id *cards = symval; unsigned int i; device_id_check(mod->name, "pnp", size, id_size, symval); for (i = 0; i < count; i++) { unsigned int j; const struct pnp_card_device_id *card = &cards[i]; for (j = 0; j < PNP_MAX_DEVICES; j++) { const char *id = (char *)card->devs[j].id; int i2, j2; int dup = 0; if (!id[0]) break; /* find duplicate, already added value */ for (i2 = 0; i2 < i && !dup; i2++) { const struct pnp_card_device_id *card2 = &cards[i2]; for (j2 = 0; j2 < PNP_MAX_DEVICES; j2++) { const char *id2 = (char *)card2->devs[j2].id; if (!id2[0]) break; if (!strcmp(id, id2)) { dup = 1; break; } } } /* add an individual alias for every device entry */ if (!dup) { char acpi_id[sizeof(card->devs[0].id)]; int k; buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"pnp:d%s*\");\n", id); /* fix broken pnp bus lowercasing */ for (k = 0; k < sizeof(acpi_id); k++) acpi_id[k] = toupper(id[k]); buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"acpi*:%s:*\");\n", acpi_id); } } } } /* Looks like: pcmcia:mNcNfNfnNpfnNvaNvbNvcNvdN. */ static int do_pcmcia_entry(const char *filename, struct pcmcia_device_id *id, char *alias) { unsigned int i; id->match_flags = TO_NATIVE(id->match_flags); id->manf_id = TO_NATIVE(id->manf_id); id->card_id = TO_NATIVE(id->card_id); id->func_id = TO_NATIVE(id->func_id); id->function = TO_NATIVE(id->function); id->device_no = TO_NATIVE(id->device_no); for (i=0; i<4; i++) { id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]); } strcpy(alias, "pcmcia:"); ADD(alias, "m", id->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID, id->manf_id); ADD(alias, "c", id->match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID, id->card_id); ADD(alias, "f", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID, id->func_id); ADD(alias, "fn", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION, id->function); ADD(alias, "pfn", id->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO, id->device_no); ADD(alias, "pa", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1, id->prod_id_hash[0]); ADD(alias, "pb", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2, id->prod_id_hash[1]); ADD(alias, "pc", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, id->prod_id_hash[2]); ADD(alias, "pd", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, id->prod_id_hash[3]); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("pcmcia", struct pcmcia_device_id, do_pcmcia_entry); static int do_of_entry (const char *filename, struct of_device_id *of, char *alias) { int len; char *tmp; len = sprintf (alias, "of:N%sT%s", of->name[0] ? of->name : "*", of->type[0] ? of->type : "*"); if (of->compatible[0]) sprintf (&alias[len], "%sC%s", of->type[0] ? "*" : "", of->compatible); /* Replace all whitespace with underscores */ for (tmp = alias; tmp && *tmp; tmp++) if (isspace (*tmp)) *tmp = '_'; add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("of", struct of_device_id, do_of_entry); static int do_vio_entry(const char *filename, struct vio_device_id *vio, char *alias) { char *tmp; sprintf(alias, "vio:T%sS%s", vio->type[0] ? vio->type : "*", vio->compat[0] ? vio->compat : "*"); /* Replace all whitespace with underscores */ for (tmp = alias; tmp && *tmp; tmp++) if (isspace (*tmp)) *tmp = '_'; add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("vio", struct vio_device_id, do_vio_entry); #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) static void do_input(char *alias, kernel_ulong_t *arr, unsigned int min, unsigned int max) { unsigned int i; for (i = min; i < max; i++) if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG))) sprintf(alias + strlen(alias), "%X,*", i); } /* input:b0v0p0e0-eXkXrXaXmXlXsXfXwX where X is comma-separated %02X. */ static int do_input_entry(const char *filename, struct input_device_id *id, char *alias) { sprintf(alias, "input:"); ADD(alias, "b", id->flags & INPUT_DEVICE_ID_MATCH_BUS, id->bustype); ADD(alias, "v", id->flags & INPUT_DEVICE_ID_MATCH_VENDOR, id->vendor); ADD(alias, "p", id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT, id->product); ADD(alias, "e", id->flags & INPUT_DEVICE_ID_MATCH_VERSION, id->version); sprintf(alias + strlen(alias), "-e*"); if (id->flags & INPUT_DEVICE_ID_MATCH_EVBIT) do_input(alias, id->evbit, 0, INPUT_DEVICE_ID_EV_MAX); sprintf(alias + strlen(alias), "k*"); if (id->flags & INPUT_DEVICE_ID_MATCH_KEYBIT) do_input(alias, id->keybit, INPUT_DEVICE_ID_KEY_MIN_INTERESTING, INPUT_DEVICE_ID_KEY_MAX); sprintf(alias + strlen(alias), "r*"); if (id->flags & INPUT_DEVICE_ID_MATCH_RELBIT) do_input(alias, id->relbit, 0, INPUT_DEVICE_ID_REL_MAX); sprintf(alias + strlen(alias), "a*"); if (id->flags & INPUT_DEVICE_ID_MATCH_ABSBIT) do_input(alias, id->absbit, 0, INPUT_DEVICE_ID_ABS_MAX); sprintf(alias + strlen(alias), "m*"); if (id->flags & INPUT_DEVICE_ID_MATCH_MSCIT) do_input(alias, id->mscbit, 0, INPUT_DEVICE_ID_MSC_MAX); sprintf(alias + strlen(alias), "l*"); if (id->flags & INPUT_DEVICE_ID_MATCH_LEDBIT) do_input(alias, id->ledbit, 0, INPUT_DEVICE_ID_LED_MAX); sprintf(alias + strlen(alias), "s*"); if (id->flags & INPUT_DEVICE_ID_MATCH_SNDBIT) do_input(alias, id->sndbit, 0, INPUT_DEVICE_ID_SND_MAX); sprintf(alias + strlen(alias), "f*"); if (id->flags & INPUT_DEVICE_ID_MATCH_FFBIT) do_input(alias, id->ffbit, 0, INPUT_DEVICE_ID_FF_MAX); sprintf(alias + strlen(alias), "w*"); if (id->flags & INPUT_DEVICE_ID_MATCH_SWBIT) do_input(alias, id->swbit, 0, INPUT_DEVICE_ID_SW_MAX); return 1; } ADD_TO_DEVTABLE("input", struct input_device_id, do_input_entry); static int do_eisa_entry(const char *filename, struct eisa_device_id *eisa, char *alias) { if (eisa->sig[0]) sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", eisa->sig); else strcat(alias, "*"); return 1; } ADD_TO_DEVTABLE("eisa", struct eisa_device_id, do_eisa_entry); /* Looks like: parisc:tNhvNrevNsvN */ static int do_parisc_entry(const char *filename, struct parisc_device_id *id, char *alias) { id->hw_type = TO_NATIVE(id->hw_type); id->hversion = TO_NATIVE(id->hversion); id->hversion_rev = TO_NATIVE(id->hversion_rev); id->sversion = TO_NATIVE(id->sversion); strcpy(alias, "parisc:"); ADD(alias, "t", id->hw_type != PA_HWTYPE_ANY_ID, id->hw_type); ADD(alias, "hv", id->hversion != PA_HVERSION_ANY_ID, id->hversion); ADD(alias, "rev", id->hversion_rev != PA_HVERSION_REV_ANY_ID, id->hversion_rev); ADD(alias, "sv", id->sversion != PA_SVERSION_ANY_ID, id->sversion); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("parisc", struct parisc_device_id, do_parisc_entry); /* Looks like: sdio:cNvNdN. */ static int do_sdio_entry(const char *filename, struct sdio_device_id *id, char *alias) { id->class = TO_NATIVE(id->class); id->vendor = TO_NATIVE(id->vendor); id->device = TO_NATIVE(id->device); strcpy(alias, "sdio:"); ADD(alias, "c", id->class != (__u8)SDIO_ANY_ID, id->class); ADD(alias, "v", id->vendor != (__u16)SDIO_ANY_ID, id->vendor); ADD(alias, "d", id->device != (__u16)SDIO_ANY_ID, id->device); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("sdio", struct sdio_device_id, do_sdio_entry); /* Looks like: ssb:vNidNrevN. */ static int do_ssb_entry(const char *filename, struct ssb_device_id *id, char *alias) { id->vendor = TO_NATIVE(id->vendor); id->coreid = TO_NATIVE(id->coreid); id->revision = TO_NATIVE(id->revision); strcpy(alias, "ssb:"); ADD(alias, "v", id->vendor != SSB_ANY_VENDOR, id->vendor); ADD(alias, "id", id->coreid != SSB_ANY_ID, id->coreid); ADD(alias, "rev", id->revision != SSB_ANY_REV, id->revision); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("ssb", struct ssb_device_id, do_ssb_entry); /* Looks like: bcma:mNidNrevNclN. */ static int do_bcma_entry(const char *filename, struct bcma_device_id *id, char *alias) { id->manuf = TO_NATIVE(id->manuf); id->id = TO_NATIVE(id->id); id->rev = TO_NATIVE(id->rev); id->class = TO_NATIVE(id->class); strcpy(alias, "bcma:"); ADD(alias, "m", id->manuf != BCMA_ANY_MANUF, id->manuf); ADD(alias, "id", id->id != BCMA_ANY_ID, id->id); ADD(alias, "rev", id->rev != BCMA_ANY_REV, id->rev); ADD(alias, "cl", id->class != BCMA_ANY_CLASS, id->class); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("bcma", struct bcma_device_id, do_bcma_entry); /* Looks like: virtio:dNvN */ static int do_virtio_entry(const char *filename, struct virtio_device_id *id, char *alias) { id->device = TO_NATIVE(id->device); id->vendor = TO_NATIVE(id->vendor); strcpy(alias, "virtio:"); ADD(alias, "d", id->device != VIRTIO_DEV_ANY_ID, id->device); ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("virtio", struct virtio_device_id, do_virtio_entry); /* * Looks like: vmbus:guid * Each byte of the guid will be represented by two hex characters * in the name. */ static int do_vmbus_entry(const char *filename, struct hv_vmbus_device_id *id, char *alias) { int i; char guid_name[((sizeof(id->guid) + 1)) * 2]; for (i = 0; i < (sizeof(id->guid) * 2); i += 2) sprintf(&guid_name[i], "%02x", id->guid[i/2]); strcpy(alias, "vmbus:"); strcat(alias, guid_name); return 1; } ADD_TO_DEVTABLE("vmbus", struct hv_vmbus_device_id, do_vmbus_entry); /* Looks like: i2c:S */ static int do_i2c_entry(const char *filename, struct i2c_device_id *id, char *alias) { sprintf(alias, I2C_MODULE_PREFIX "%s", id->name); return 1; } ADD_TO_DEVTABLE("i2c", struct i2c_device_id, do_i2c_entry); /* Looks like: spi:S */ static int do_spi_entry(const char *filename, struct spi_device_id *id, char *alias) { sprintf(alias, SPI_MODULE_PREFIX "%s", id->name); return 1; } ADD_TO_DEVTABLE("spi", struct spi_device_id, do_spi_entry); static const struct dmifield { const char *prefix; int field; } dmi_fields[] = { { "bvn", DMI_BIOS_VENDOR }, { "bvr", DMI_BIOS_VERSION }, { "bd", DMI_BIOS_DATE }, { "svn", DMI_SYS_VENDOR }, { "pn", DMI_PRODUCT_NAME }, { "pvr", DMI_PRODUCT_VERSION }, { "rvn", DMI_BOARD_VENDOR }, { "rn", DMI_BOARD_NAME }, { "rvr", DMI_BOARD_VERSION }, { "cvn", DMI_CHASSIS_VENDOR }, { "ct", DMI_CHASSIS_TYPE }, { "cvr", DMI_CHASSIS_VERSION }, { NULL, DMI_NONE } }; static void dmi_ascii_filter(char *d, const char *s) { /* Filter out characters we don't want to see in the modalias string */ for (; *s; s++) if (*s > ' ' && *s < 127 && *s != ':') *(d++) = *s; *d = 0; } static int do_dmi_entry(const char *filename, struct dmi_system_id *id, char *alias) { int i, j; sprintf(alias, "dmi*"); for (i = 0; i < ARRAY_SIZE(dmi_fields); i++) { for (j = 0; j < 4; j++) { if (id->matches[j].slot && id->matches[j].slot == dmi_fields[i].field) { sprintf(alias + strlen(alias), ":%s*", dmi_fields[i].prefix); dmi_ascii_filter(alias + strlen(alias), id->matches[j].substr); strcat(alias, "*"); } } } strcat(alias, ":"); return 1; } ADD_TO_DEVTABLE("dmi", struct dmi_system_id, do_dmi_entry); static int do_platform_entry(const char *filename, struct platform_device_id *id, char *alias) { sprintf(alias, PLATFORM_MODULE_PREFIX "%s", id->name); return 1; } ADD_TO_DEVTABLE("platform", struct platform_device_id, do_platform_entry); static int do_mdio_entry(const char *filename, struct mdio_device_id *id, char *alias) { int i; alias += sprintf(alias, MDIO_MODULE_PREFIX); for (i = 0; i < 32; i++) { if (!((id->phy_id_mask >> (31-i)) & 1)) *(alias++) = '?'; else if ((id->phy_id >> (31-i)) & 1) *(alias++) = '1'; else *(alias++) = '0'; } /* Terminate the string */ *alias = 0; return 1; } ADD_TO_DEVTABLE("mdio", struct mdio_device_id, do_mdio_entry); /* Looks like: zorro:iN. */ static int do_zorro_entry(const char *filename, struct zorro_device_id *id, char *alias) { id->id = TO_NATIVE(id->id); strcpy(alias, "zorro:"); ADD(alias, "i", id->id != ZORRO_WILDCARD, id->id); return 1; } ADD_TO_DEVTABLE("zorro", struct zorro_device_id, do_zorro_entry); /* looks like: "pnp:dD" */ static int do_isapnp_entry(const char *filename, struct isapnp_device_id *id, char *alias) { sprintf(alias, "pnp:d%c%c%c%x%x%x%x*", 'A' + ((id->vendor >> 2) & 0x3f) - 1, 'A' + (((id->vendor & 3) << 3) | ((id->vendor >> 13) & 7)) - 1, 'A' + ((id->vendor >> 8) & 0x1f) - 1, (id->function >> 4) & 0x0f, id->function & 0x0f, (id->function >> 12) & 0x0f, (id->function >> 8) & 0x0f); return 1; } ADD_TO_DEVTABLE("isapnp", struct isapnp_device_id, do_isapnp_entry); /* * Append a match expression for a single masked hex digit. * outp points to a pointer to the character at which to append. * *outp is updated on return to point just after the appended text, * to facilitate further appending. */ static void append_nibble_mask(char **outp, unsigned int nibble, unsigned int mask) { char *p = *outp; unsigned int i; switch (mask) { case 0: *p++ = '?'; break; case 0xf: p += sprintf(p, "%X", nibble); break; default: /* * Dumbly emit a match pattern for all possible matching * digits. This could be improved in some cases using ranges, * but it has the advantage of being trivially correct, and is * often optimal. */ *p++ = '['; for (i = 0; i < 0x10; i++) if ((i & mask) == nibble) p += sprintf(p, "%X", i); *p++ = ']'; } /* Ensure that the string remains NUL-terminated: */ *p = '\0'; /* Advance the caller's end-of-string pointer: */ *outp = p; } /* * looks like: "amba:dN" * * N is exactly 8 digits, where each is an upper-case hex digit, or * a ? or [] pattern matching exactly one digit. */ static int do_amba_entry(const char *filename, struct amba_id *id, char *alias) { unsigned int digit; char *p = alias; if ((id->id & id->mask) != id->id) fatal("%s: Masked-off bit(s) of AMBA device ID are non-zero: " "id=0x%08X, mask=0x%08X. Please fix this driver.\n", filename, id->id, id->mask); p += sprintf(alias, "amba:d"); for (digit = 0; digit < 8; digit++) append_nibble_mask(&p, (id->id >> (4 * (7 - digit))) & 0xf, (id->mask >> (4 * (7 - digit))) & 0xf); return 1; } ADD_TO_DEVTABLE("amba", struct amba_id, do_amba_entry); /* LOOKS like x86cpu:vendor:VVVV:family:FFFF:model:MMMM:feature:*,FEAT,* * All fields are numbers. It would be nicer to use strings for vendor * and feature, but getting those out of the build system here is too * complicated. */ static int do_x86cpu_entry(const char *filename, struct x86_cpu_id *id, char *alias) { id->feature = TO_NATIVE(id->feature); id->family = TO_NATIVE(id->family); id->model = TO_NATIVE(id->model); id->vendor = TO_NATIVE(id->vendor); strcpy(alias, "x86cpu:"); ADD(alias, "vendor:", id->vendor != X86_VENDOR_ANY, id->vendor); ADD(alias, ":family:", id->family != X86_FAMILY_ANY, id->family); ADD(alias, ":model:", id->model != X86_MODEL_ANY, id->model); strcat(alias, ":feature:*"); if (id->feature != X86_FEATURE_ANY) sprintf(alias + strlen(alias), "%04X*", id->feature); return 1; } ADD_TO_DEVTABLE("x86cpu", struct x86_cpu_id, do_x86cpu_entry); /* Does namelen bytes of name exactly match the symbol? */ static bool sym_is(const char *name, unsigned namelen, const char *symbol) { if (namelen != strlen(symbol)) return false; return memcmp(name, symbol, namelen) == 0; } static void do_table(void *symval, unsigned long size, unsigned long id_size, const char *device_id, void *function, struct module *mod) { unsigned int i; char alias[500]; int (*do_entry)(const char *, void *entry, char *alias) = function; device_id_check(mod->name, device_id, size, id_size, symval); /* Leave last one: it's the terminator. */ size -= id_size; for (i = 0; i < size; i += id_size) { if (do_entry(mod->name, symval+i, alias)) { buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); } } } /* Create MODULE_ALIAS() statements. * At this time, we cannot write the actual output C source yet, * so we write into the mod->dev_table_buf buffer. */ void handle_moddevtable(struct module *mod, struct elf_info *info, Elf_Sym *sym, const char *symname) { void *symval; char *zeros = NULL; const char *name; unsigned int namelen; /* We're looking for a section relative symbol */ if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections) return; /* We're looking for an object */ if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT) return; /* All our symbols are of form <prefix>__mod_XXX_device_table. */ name = strstr(symname, "__mod_"); if (!name) return; name += strlen("__mod_"); namelen = strlen(name); if (namelen < strlen("_device_table")) return; if (strcmp(name + namelen - strlen("_device_table"), "_device_table")) return; namelen -= strlen("_device_table"); /* Handle all-NULL symbols allocated into .bss */ if (info->sechdrs[get_secindex(info, sym)].sh_type & SHT_NOBITS) { zeros = calloc(1, sym->st_size); symval = zeros; } else { symval = (void *)info->hdr + info->sechdrs[get_secindex(info, sym)].sh_offset + sym->st_value; } /* First handle the "special" cases */ if (sym_is(name, namelen, "usb")) do_usb_table(symval, sym->st_size, mod); else if (sym_is(name, namelen, "pnp")) do_pnp_device_entry(symval, sym->st_size, mod); else if (sym_is(name, namelen, "pnp_card")) do_pnp_card_entries(symval, sym->st_size, mod); else { struct devtable **p; INIT_SECTION(__devtable); for (p = __start___devtable; p < __stop___devtable; p++) { if (sym_is(name, namelen, (*p)->device_id)) { do_table(symval, sym->st_size, (*p)->id_size, (*p)->device_id, (*p)->function, mod); break; } } } free(zeros); } /* Now add out buffered information to the generated C source */ void add_moddevtable(struct buffer *buf, struct module *mod) { buf_printf(buf, "\n"); buf_write(buf, mod->dev_table_buf.p, mod->dev_table_buf.pos); free(mod->dev_table_buf.p); }
gpl-2.0
SlimSaber/android_kernel_oppo_msm8974
drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
3644
17227
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <media/msm/vidc_type.h> #include "vcd_ddl_utils.h" #include "vcd_ddl_metadata.h" #include "vcd_res_tracker_api.h" u32 ddl_device_init(struct ddl_init_config *ddl_init_config, void *client_data) { struct ddl_context *ddl_context; u32 status = VCD_S_SUCCESS; if ((!ddl_init_config) || (!ddl_init_config->ddl_callback) || (!ddl_init_config->core_virtual_base_addr) ) { VIDC_LOGERR_STRING("ddl_dev_init:Bad_argument"); return VCD_ERR_ILLEGAL_PARM; } ddl_context = ddl_get_context(); if (DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dev_init:Multiple_init"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dev_init:Ddl_busy"); return VCD_ERR_BUSY; } DDL_MEMSET(ddl_context, 0, sizeof(struct ddl_context)); DDL_BUSY(ddl_context); if (res_trk_get_enable_ion()) { VIDC_LOGERR_STRING("ddl_dev_init: ION framework enabled"); ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { VIDC_LOGERR_STRING("ION client create failed"); return VCD_ERR_ILLEGAL_OP; } } ddl_context->memtype = res_trk_get_mem_type(); if (ddl_context->memtype == -1) { VIDC_LOGERR_STRING("ddl_dev_init:Invalid Memtype"); return VCD_ERR_ILLEGAL_PARM; } ddl_context->ddl_callback = ddl_init_config->ddl_callback; ddl_context->interrupt_clr = ddl_init_config->interrupt_clr; ddl_context->core_virtual_base_addr = ddl_init_config->core_virtual_base_addr; ddl_context->client_data = client_data; vidc_720p_set_device_virtual_base(ddl_context-> core_virtual_base_addr); ddl_context->current_ddl = NULL; ddl_move_command_state(ddl_context, DDL_CMD_INVALID); ddl_client_transact(DDL_INIT_CLIENTS, NULL); ddl_pmem_alloc(&ddl_context->context_buf_addr, DDL_CONTEXT_MEMORY, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!ddl_context->context_buf_addr.virtual_base_addr) { VIDC_LOGERR_STRING("ddl_dev_init:Context_alloc_fail"); status = VCD_ERR_ALLOC_FAIL; } if (!status) { ddl_pmem_alloc(&ddl_context->db_line_buffer, DDL_DB_LINE_BUF_SIZE, DDL_TILE_BUFFER_ALIGN_BYTES); if (!ddl_context->db_line_buffer.virtual_base_addr) { VIDC_LOGERR_STRING("ddl_dev_init:Line_buf_alloc_fail"); status = VCD_ERR_ALLOC_FAIL; } } if (!status) { ddl_pmem_alloc(&ddl_context->data_partition_tempbuf, DDL_MPEG4_DATA_PARTITION_BUF_SIZE, DDL_TILE_BUFFER_ALIGN_BYTES); if (ddl_context->data_partition_tempbuf.virtual_base_addr \ == NULL) { VIDC_LOGERR_STRING ("ddl_dev_init:Data_partition_buf_alloc_fail"); status = VCD_ERR_ALLOC_FAIL; } } if (!status) { ddl_pmem_alloc(&ddl_context->metadata_shared_input, DDL_METADATA_TOTAL_INPUTBUFSIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!ddl_context->metadata_shared_input.virtual_base_addr) { VIDC_LOGERR_STRING ("ddl_dev_init:metadata_shared_input_alloc_fail"); status = VCD_ERR_ALLOC_FAIL; } } if (!status) { ddl_pmem_alloc(&ddl_context->dbg_core_dump, \ DDL_DBG_CORE_DUMP_SIZE, \ DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!ddl_context->dbg_core_dump.virtual_base_addr) { VIDC_LOGERR_STRING ("ddl_dev_init:dbg_core_dump_alloc_failed"); status = VCD_ERR_ALLOC_FAIL; } ddl_context->enable_dbg_core_dump = 0; } if (!status && !vcd_fw_init()) { VIDC_LOGERR_STRING("ddl_dev_init:fw_init_failed"); status = VCD_ERR_ALLOC_FAIL; } if (status) { ddl_release_context_buffers(ddl_context); DDL_IDLE(ddl_context); return status; } ddl_move_command_state(ddl_context, DDL_CMD_DMA_INIT); ddl_core_init(ddl_context); return status; } u32 ddl_device_release(void *client_data) { struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dev_rel:Ddl_busy"); return VCD_ERR_BUSY; } if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dev_rel:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_client_transact(DDL_ACTIVE_CLIENT, NULL)) { VIDC_LOGERR_STRING("ddl_dev_rel:Client_present_err"); return VCD_ERR_CLIENT_PRESENT; } DDL_BUSY(ddl_context); ddl_context->device_state = DDL_DEVICE_NOTINIT; ddl_context->client_data = client_data; ddl_move_command_state(ddl_context, DDL_CMD_INVALID); vidc_720p_stop_fw(); VIDC_LOG_STRING("FW_ENDDONE"); ddl_release_context_buffers(ddl_context); ddl_context->video_ion_client = NULL; DDL_IDLE(ddl_context); return VCD_S_SUCCESS; } u32 ddl_open(u32 **ddl_handle, u32 decoding) { struct ddl_context *ddl_context; struct ddl_client_context *ddl; u32 status; if (!ddl_handle) { VIDC_LOGERR_STRING("ddl_open:Bad_handle"); return VCD_ERR_BAD_HANDLE; } ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_open:Not_inited"); return VCD_ERR_ILLEGAL_OP; } status = ddl_client_transact(DDL_GET_CLIENT, &ddl); if (status) { VIDC_LOGERR_STRING("ddl_open:Client_trasac_failed"); return status; } ddl_move_client_state(ddl, DDL_CLIENT_OPEN); ddl->codec_data.hdr.decoding = decoding; ddl->decoding = decoding; ddl_set_default_meta_data_hdr(ddl); ddl_set_initial_default_values(ddl); *ddl_handle = (u32 *) ddl; return VCD_S_SUCCESS; } u32 ddl_close(u32 **ddl_handle) { struct ddl_context *ddl_context; struct ddl_client_context **ddl = (struct ddl_client_context **)ddl_handle; if (!ddl || !*ddl) { VIDC_LOGERR_STRING("ddl_close:Bad_handle"); return VCD_ERR_BAD_HANDLE; } ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_close:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (!DDLCLIENT_STATE_IS(*ddl, DDL_CLIENT_OPEN)) { VIDC_LOGERR_STRING("ddl_close:Not_in_open_state"); return VCD_ERR_ILLEGAL_OP; } ddl_move_client_state(*ddl, DDL_CLIENT_INVALID); if ((*ddl)->decoding) { vcd_fw_transact(false, true, (*ddl)->codec_data.decoder.codec.codec); } else { vcd_fw_transact(false, false, (*ddl)->codec_data.encoder.codec.codec); } ddl_client_transact(DDL_FREE_CLIENT, ddl); return VCD_S_SUCCESS; } u32 ddl_encode_start(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context; struct ddl_encoder_data *encoder; u32 dpb_size; ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_start:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_start:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { VIDC_LOGERR_STRING("ddl_enc_start:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { VIDC_LOGERR_STRING("ddl_enc_start:Not_opened"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_encoder_ready_to_start(ddl)) { VIDC_LOGERR_STRING("ddl_enc_start:Err_param_settings"); return VCD_ERR_ILLEGAL_OP; } encoder = &ddl->codec_data.encoder; dpb_size = ddl_get_yuv_buffer_size(&encoder->frame_size, &encoder->re_con_buf_format, false, encoder->codec.codec); dpb_size *= DDL_ENC_NUM_DPB_BUFFERS; ddl_pmem_alloc(&encoder->enc_dpb_addr, dpb_size, DDL_TILE_BUFFER_ALIGN_BYTES); if (!encoder->enc_dpb_addr.virtual_base_addr) { VIDC_LOGERR_STRING("ddl_enc_start:Dpb_alloc_failed"); return VCD_ERR_ALLOC_FAIL; } if ((encoder->codec.codec == VCD_CODEC_MPEG4 && !encoder->short_header.short_header) || encoder->codec.codec == VCD_CODEC_H264) { ddl_pmem_alloc(&encoder->seq_header, DDL_ENC_SEQHEADER_SIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!encoder->seq_header.virtual_base_addr) { ddl_pmem_free(&encoder->enc_dpb_addr); VIDC_LOGERR_STRING ("ddl_enc_start:Seq_hdr_alloc_failed"); return VCD_ERR_ALLOC_FAIL; } } else { encoder->seq_header.buffer_size = 0; encoder->seq_header.virtual_base_addr = 0; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl_channel_set(ddl); return VCD_S_SUCCESS; } u32 ddl_decode_start(u32 *ddl_handle, struct vcd_sequence_hdr *header, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context; struct ddl_decoder_data *decoder; ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_start:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_start:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { VIDC_LOGERR_STRING("ddl_dec_start:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { VIDC_LOGERR_STRING("ddl_dec_start:Not_in_opened_state"); return VCD_ERR_ILLEGAL_OP; } if ((header) && ((!header->sequence_header_len) || (!header->sequence_header) ) ) { VIDC_LOGERR_STRING("ddl_dec_start:Bad_param_seq_header"); return VCD_ERR_ILLEGAL_PARM; } if (!ddl_decoder_ready_to_start(ddl, header)) { VIDC_LOGERR_STRING("ddl_dec_start:Err_param_settings"); return VCD_ERR_ILLEGAL_OP; } DDL_BUSY(ddl_context); decoder = &ddl->codec_data.decoder; if (header) { decoder->header_in_start = true; decoder->decode_config = *header; } else { decoder->header_in_start = false; decoder->decode_config.sequence_header_len = 0; } if (decoder->codec.codec == VCD_CODEC_H264) { ddl_pmem_alloc(&decoder->h264Vsp_temp_buffer, DDL_DECODE_H264_VSPTEMP_BUFSIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!decoder->h264Vsp_temp_buffer.virtual_base_addr) { DDL_IDLE(ddl_context); VIDC_LOGERR_STRING ("ddl_dec_start:H264Sps_alloc_failed"); return VCD_ERR_ALLOC_FAIL; } } ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl_channel_set(ddl); return VCD_S_SUCCESS; } u32 ddl_decode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *input_bits, void *client_data) { u32 vcd_status = VCD_S_SUCCESS; struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_frame:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_frame:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { VIDC_LOGERR_STRING("ddl_dec_frame:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!input_bits || ((!input_bits->vcd_frm.physical || !input_bits->vcd_frm.data_len) && (!(VCD_FRAME_FLAG_EOS & input_bits->vcd_frm.flags)) ) ) { VIDC_LOGERR_STRING("ddl_dec_frame:Bad_input_param"); return VCD_ERR_ILLEGAL_PARM; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl->input_frame = *input_bits; if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { ddl_decode_frame_run(ddl); } else { if (!ddl->codec_data.decoder.dp_buf.no_of_dec_pic_buf) { VIDC_LOGERR_STRING("ddl_dec_frame:Dpbs_requied"); vcd_status = VCD_ERR_ILLEGAL_OP; } else if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) { vcd_status = ddl_decode_set_buffers(ddl); } else if (DDLCLIENT_STATE_IS (ddl, DDL_CLIENT_WAIT_FOR_INITCODEC)) { ddl->codec_data.decoder.decode_config. sequence_header = ddl->input_frame.vcd_frm.physical; ddl->codec_data.decoder.decode_config. sequence_header_len = ddl->input_frame.vcd_frm.data_len; ddl_decode_init_codec(ddl); } else { VIDC_LOGERR_STRING("Dec_frame:Wrong_state"); vcd_status = VCD_ERR_ILLEGAL_OP; } if (vcd_status) DDL_IDLE(ddl_context); } return vcd_status; } u32 ddl_encode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *input_frame, struct ddl_frame_data_tag *output_bit, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context = ddl_get_context(); if (vidc_msg_timing) ddl_set_core_start_time(__func__, ENC_OP_TIME); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_frame:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_frame:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { VIDC_LOGERR_STRING("ddl_enc_frame:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!input_frame || !input_frame->vcd_frm.physical || !input_frame->vcd_frm.data_len) { VIDC_LOGERR_STRING("ddl_enc_frame:Bad_input_params"); return VCD_ERR_ILLEGAL_PARM; } if ((((u32) input_frame->vcd_frm.physical + input_frame->vcd_frm.offset) & (DDL_STREAMBUF_ALIGN_GUARD_BYTES) ) ) { VIDC_LOGERR_STRING ("ddl_enc_frame:Un_aligned_yuv_start_address"); return VCD_ERR_ILLEGAL_PARM; } if (!output_bit || !output_bit->vcd_frm.physical || !output_bit->vcd_frm.alloc_len) { VIDC_LOGERR_STRING("ddl_enc_frame:Bad_output_params"); return VCD_ERR_ILLEGAL_PARM; } if ((ddl->codec_data.encoder.output_buf_req.sz + output_bit->vcd_frm.offset) > output_bit->vcd_frm.alloc_len) { VIDC_LOGERR_STRING ("ddl_enc_frame:offset_large, Exceeds_min_buf_size"); } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { VIDC_LOGERR_STRING("ddl_enc_frame:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl->input_frame = *input_frame; ddl->output_frame = *output_bit; ddl_encode_frame_run(ddl); return VCD_S_SUCCESS; } u32 ddl_decode_end(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (vidc_msg_timing) { ddl_reset_core_time_variables(DEC_OP_TIME); ddl_reset_core_time_variables(DEC_IP_TIME); } if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_end:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_end:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { VIDC_LOGERR_STRING("ddl_dec_end:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FATAL_ERROR) ) { VIDC_LOGERR_STRING("ddl_dec_end:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl_channel_end(ddl); return VCD_S_SUCCESS; } u32 ddl_encode_end(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (vidc_msg_timing) ddl_reset_core_time_variables(ENC_OP_TIME); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_end:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_end:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { VIDC_LOGERR_STRING("ddl_enc_end:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FATAL_ERROR)) { VIDC_LOGERR_STRING("ddl_enc_end:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl_channel_end(ddl); return VCD_S_SUCCESS; } u32 ddl_reset_hw(u32 mode) { struct ddl_context *ddl_context; struct ddl_client_context *ddl; int i_client_num; VIDC_LOG_STRING("ddl_reset_hw:called"); ddl_context = ddl_get_context(); ddl_move_command_state(ddl_context, DDL_CMD_INVALID); DDL_BUSY(ddl_context); if (ddl_context->core_virtual_base_addr) vidc_720p_do_sw_reset(); ddl_context->device_state = DDL_DEVICE_NOTINIT; for (i_client_num = 0; i_client_num < VCD_MAX_NO_CLIENT; ++i_client_num) { ddl = ddl_context->ddl_clients[i_client_num]; ddl_context->ddl_clients[i_client_num] = NULL; if (ddl) { ddl_release_client_internal_buffers(ddl); ddl_client_transact(DDL_FREE_CLIENT, &ddl); } } ddl_release_context_buffers(ddl_context); DDL_MEMSET(ddl_context, 0, sizeof(struct ddl_context)); return true; }
gpl-2.0
Perferom/android_kernel_samsung_msm
drivers/video/msm/vidc/720p/ddl/vcd_ddl.c
3644
17227
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <media/msm/vidc_type.h> #include "vcd_ddl_utils.h" #include "vcd_ddl_metadata.h" #include "vcd_res_tracker_api.h" u32 ddl_device_init(struct ddl_init_config *ddl_init_config, void *client_data) { struct ddl_context *ddl_context; u32 status = VCD_S_SUCCESS; if ((!ddl_init_config) || (!ddl_init_config->ddl_callback) || (!ddl_init_config->core_virtual_base_addr) ) { VIDC_LOGERR_STRING("ddl_dev_init:Bad_argument"); return VCD_ERR_ILLEGAL_PARM; } ddl_context = ddl_get_context(); if (DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dev_init:Multiple_init"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dev_init:Ddl_busy"); return VCD_ERR_BUSY; } DDL_MEMSET(ddl_context, 0, sizeof(struct ddl_context)); DDL_BUSY(ddl_context); if (res_trk_get_enable_ion()) { VIDC_LOGERR_STRING("ddl_dev_init: ION framework enabled"); ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { VIDC_LOGERR_STRING("ION client create failed"); return VCD_ERR_ILLEGAL_OP; } } ddl_context->memtype = res_trk_get_mem_type(); if (ddl_context->memtype == -1) { VIDC_LOGERR_STRING("ddl_dev_init:Invalid Memtype"); return VCD_ERR_ILLEGAL_PARM; } ddl_context->ddl_callback = ddl_init_config->ddl_callback; ddl_context->interrupt_clr = ddl_init_config->interrupt_clr; ddl_context->core_virtual_base_addr = ddl_init_config->core_virtual_base_addr; ddl_context->client_data = client_data; vidc_720p_set_device_virtual_base(ddl_context-> core_virtual_base_addr); ddl_context->current_ddl = NULL; ddl_move_command_state(ddl_context, DDL_CMD_INVALID); ddl_client_transact(DDL_INIT_CLIENTS, NULL); ddl_pmem_alloc(&ddl_context->context_buf_addr, DDL_CONTEXT_MEMORY, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!ddl_context->context_buf_addr.virtual_base_addr) { VIDC_LOGERR_STRING("ddl_dev_init:Context_alloc_fail"); status = VCD_ERR_ALLOC_FAIL; } if (!status) { ddl_pmem_alloc(&ddl_context->db_line_buffer, DDL_DB_LINE_BUF_SIZE, DDL_TILE_BUFFER_ALIGN_BYTES); if (!ddl_context->db_line_buffer.virtual_base_addr) { VIDC_LOGERR_STRING("ddl_dev_init:Line_buf_alloc_fail"); status = VCD_ERR_ALLOC_FAIL; } } if (!status) { ddl_pmem_alloc(&ddl_context->data_partition_tempbuf, DDL_MPEG4_DATA_PARTITION_BUF_SIZE, DDL_TILE_BUFFER_ALIGN_BYTES); if (ddl_context->data_partition_tempbuf.virtual_base_addr \ == NULL) { VIDC_LOGERR_STRING ("ddl_dev_init:Data_partition_buf_alloc_fail"); status = VCD_ERR_ALLOC_FAIL; } } if (!status) { ddl_pmem_alloc(&ddl_context->metadata_shared_input, DDL_METADATA_TOTAL_INPUTBUFSIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!ddl_context->metadata_shared_input.virtual_base_addr) { VIDC_LOGERR_STRING ("ddl_dev_init:metadata_shared_input_alloc_fail"); status = VCD_ERR_ALLOC_FAIL; } } if (!status) { ddl_pmem_alloc(&ddl_context->dbg_core_dump, \ DDL_DBG_CORE_DUMP_SIZE, \ DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!ddl_context->dbg_core_dump.virtual_base_addr) { VIDC_LOGERR_STRING ("ddl_dev_init:dbg_core_dump_alloc_failed"); status = VCD_ERR_ALLOC_FAIL; } ddl_context->enable_dbg_core_dump = 0; } if (!status && !vcd_fw_init()) { VIDC_LOGERR_STRING("ddl_dev_init:fw_init_failed"); status = VCD_ERR_ALLOC_FAIL; } if (status) { ddl_release_context_buffers(ddl_context); DDL_IDLE(ddl_context); return status; } ddl_move_command_state(ddl_context, DDL_CMD_DMA_INIT); ddl_core_init(ddl_context); return status; } u32 ddl_device_release(void *client_data) { struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dev_rel:Ddl_busy"); return VCD_ERR_BUSY; } if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dev_rel:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_client_transact(DDL_ACTIVE_CLIENT, NULL)) { VIDC_LOGERR_STRING("ddl_dev_rel:Client_present_err"); return VCD_ERR_CLIENT_PRESENT; } DDL_BUSY(ddl_context); ddl_context->device_state = DDL_DEVICE_NOTINIT; ddl_context->client_data = client_data; ddl_move_command_state(ddl_context, DDL_CMD_INVALID); vidc_720p_stop_fw(); VIDC_LOG_STRING("FW_ENDDONE"); ddl_release_context_buffers(ddl_context); ddl_context->video_ion_client = NULL; DDL_IDLE(ddl_context); return VCD_S_SUCCESS; } u32 ddl_open(u32 **ddl_handle, u32 decoding) { struct ddl_context *ddl_context; struct ddl_client_context *ddl; u32 status; if (!ddl_handle) { VIDC_LOGERR_STRING("ddl_open:Bad_handle"); return VCD_ERR_BAD_HANDLE; } ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_open:Not_inited"); return VCD_ERR_ILLEGAL_OP; } status = ddl_client_transact(DDL_GET_CLIENT, &ddl); if (status) { VIDC_LOGERR_STRING("ddl_open:Client_trasac_failed"); return status; } ddl_move_client_state(ddl, DDL_CLIENT_OPEN); ddl->codec_data.hdr.decoding = decoding; ddl->decoding = decoding; ddl_set_default_meta_data_hdr(ddl); ddl_set_initial_default_values(ddl); *ddl_handle = (u32 *) ddl; return VCD_S_SUCCESS; } u32 ddl_close(u32 **ddl_handle) { struct ddl_context *ddl_context; struct ddl_client_context **ddl = (struct ddl_client_context **)ddl_handle; if (!ddl || !*ddl) { VIDC_LOGERR_STRING("ddl_close:Bad_handle"); return VCD_ERR_BAD_HANDLE; } ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_close:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (!DDLCLIENT_STATE_IS(*ddl, DDL_CLIENT_OPEN)) { VIDC_LOGERR_STRING("ddl_close:Not_in_open_state"); return VCD_ERR_ILLEGAL_OP; } ddl_move_client_state(*ddl, DDL_CLIENT_INVALID); if ((*ddl)->decoding) { vcd_fw_transact(false, true, (*ddl)->codec_data.decoder.codec.codec); } else { vcd_fw_transact(false, false, (*ddl)->codec_data.encoder.codec.codec); } ddl_client_transact(DDL_FREE_CLIENT, ddl); return VCD_S_SUCCESS; } u32 ddl_encode_start(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context; struct ddl_encoder_data *encoder; u32 dpb_size; ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_start:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_start:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { VIDC_LOGERR_STRING("ddl_enc_start:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { VIDC_LOGERR_STRING("ddl_enc_start:Not_opened"); return VCD_ERR_ILLEGAL_OP; } if (!ddl_encoder_ready_to_start(ddl)) { VIDC_LOGERR_STRING("ddl_enc_start:Err_param_settings"); return VCD_ERR_ILLEGAL_OP; } encoder = &ddl->codec_data.encoder; dpb_size = ddl_get_yuv_buffer_size(&encoder->frame_size, &encoder->re_con_buf_format, false, encoder->codec.codec); dpb_size *= DDL_ENC_NUM_DPB_BUFFERS; ddl_pmem_alloc(&encoder->enc_dpb_addr, dpb_size, DDL_TILE_BUFFER_ALIGN_BYTES); if (!encoder->enc_dpb_addr.virtual_base_addr) { VIDC_LOGERR_STRING("ddl_enc_start:Dpb_alloc_failed"); return VCD_ERR_ALLOC_FAIL; } if ((encoder->codec.codec == VCD_CODEC_MPEG4 && !encoder->short_header.short_header) || encoder->codec.codec == VCD_CODEC_H264) { ddl_pmem_alloc(&encoder->seq_header, DDL_ENC_SEQHEADER_SIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!encoder->seq_header.virtual_base_addr) { ddl_pmem_free(&encoder->enc_dpb_addr); VIDC_LOGERR_STRING ("ddl_enc_start:Seq_hdr_alloc_failed"); return VCD_ERR_ALLOC_FAIL; } } else { encoder->seq_header.buffer_size = 0; encoder->seq_header.virtual_base_addr = 0; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl_channel_set(ddl); return VCD_S_SUCCESS; } u32 ddl_decode_start(u32 *ddl_handle, struct vcd_sequence_hdr *header, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context; struct ddl_decoder_data *decoder; ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_start:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_start:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { VIDC_LOGERR_STRING("ddl_dec_start:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { VIDC_LOGERR_STRING("ddl_dec_start:Not_in_opened_state"); return VCD_ERR_ILLEGAL_OP; } if ((header) && ((!header->sequence_header_len) || (!header->sequence_header) ) ) { VIDC_LOGERR_STRING("ddl_dec_start:Bad_param_seq_header"); return VCD_ERR_ILLEGAL_PARM; } if (!ddl_decoder_ready_to_start(ddl, header)) { VIDC_LOGERR_STRING("ddl_dec_start:Err_param_settings"); return VCD_ERR_ILLEGAL_OP; } DDL_BUSY(ddl_context); decoder = &ddl->codec_data.decoder; if (header) { decoder->header_in_start = true; decoder->decode_config = *header; } else { decoder->header_in_start = false; decoder->decode_config.sequence_header_len = 0; } if (decoder->codec.codec == VCD_CODEC_H264) { ddl_pmem_alloc(&decoder->h264Vsp_temp_buffer, DDL_DECODE_H264_VSPTEMP_BUFSIZE, DDL_LINEAR_BUFFER_ALIGN_BYTES); if (!decoder->h264Vsp_temp_buffer.virtual_base_addr) { DDL_IDLE(ddl_context); VIDC_LOGERR_STRING ("ddl_dec_start:H264Sps_alloc_failed"); return VCD_ERR_ALLOC_FAIL; } } ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl_channel_set(ddl); return VCD_S_SUCCESS; } u32 ddl_decode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *input_bits, void *client_data) { u32 vcd_status = VCD_S_SUCCESS; struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_frame:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_frame:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { VIDC_LOGERR_STRING("ddl_dec_frame:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!input_bits || ((!input_bits->vcd_frm.physical || !input_bits->vcd_frm.data_len) && (!(VCD_FRAME_FLAG_EOS & input_bits->vcd_frm.flags)) ) ) { VIDC_LOGERR_STRING("ddl_dec_frame:Bad_input_param"); return VCD_ERR_ILLEGAL_PARM; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl->input_frame = *input_bits; if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { ddl_decode_frame_run(ddl); } else { if (!ddl->codec_data.decoder.dp_buf.no_of_dec_pic_buf) { VIDC_LOGERR_STRING("ddl_dec_frame:Dpbs_requied"); vcd_status = VCD_ERR_ILLEGAL_OP; } else if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) { vcd_status = ddl_decode_set_buffers(ddl); } else if (DDLCLIENT_STATE_IS (ddl, DDL_CLIENT_WAIT_FOR_INITCODEC)) { ddl->codec_data.decoder.decode_config. sequence_header = ddl->input_frame.vcd_frm.physical; ddl->codec_data.decoder.decode_config. sequence_header_len = ddl->input_frame.vcd_frm.data_len; ddl_decode_init_codec(ddl); } else { VIDC_LOGERR_STRING("Dec_frame:Wrong_state"); vcd_status = VCD_ERR_ILLEGAL_OP; } if (vcd_status) DDL_IDLE(ddl_context); } return vcd_status; } u32 ddl_encode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *input_frame, struct ddl_frame_data_tag *output_bit, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context = ddl_get_context(); if (vidc_msg_timing) ddl_set_core_start_time(__func__, ENC_OP_TIME); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_frame:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_frame:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { VIDC_LOGERR_STRING("ddl_enc_frame:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!input_frame || !input_frame->vcd_frm.physical || !input_frame->vcd_frm.data_len) { VIDC_LOGERR_STRING("ddl_enc_frame:Bad_input_params"); return VCD_ERR_ILLEGAL_PARM; } if ((((u32) input_frame->vcd_frm.physical + input_frame->vcd_frm.offset) & (DDL_STREAMBUF_ALIGN_GUARD_BYTES) ) ) { VIDC_LOGERR_STRING ("ddl_enc_frame:Un_aligned_yuv_start_address"); return VCD_ERR_ILLEGAL_PARM; } if (!output_bit || !output_bit->vcd_frm.physical || !output_bit->vcd_frm.alloc_len) { VIDC_LOGERR_STRING("ddl_enc_frame:Bad_output_params"); return VCD_ERR_ILLEGAL_PARM; } if ((ddl->codec_data.encoder.output_buf_req.sz + output_bit->vcd_frm.offset) > output_bit->vcd_frm.alloc_len) { VIDC_LOGERR_STRING ("ddl_enc_frame:offset_large, Exceeds_min_buf_size"); } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { VIDC_LOGERR_STRING("ddl_enc_frame:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl->input_frame = *input_frame; ddl->output_frame = *output_bit; ddl_encode_frame_run(ddl); return VCD_S_SUCCESS; } u32 ddl_decode_end(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (vidc_msg_timing) { ddl_reset_core_time_variables(DEC_OP_TIME); ddl_reset_core_time_variables(DEC_IP_TIME); } if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_end:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_dec_end:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || !ddl->decoding) { VIDC_LOGERR_STRING("ddl_dec_end:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FATAL_ERROR) ) { VIDC_LOGERR_STRING("ddl_dec_end:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl_channel_end(ddl); return VCD_S_SUCCESS; } u32 ddl_encode_end(u32 *ddl_handle, void *client_data) { struct ddl_client_context *ddl = (struct ddl_client_context *)ddl_handle; struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (vidc_msg_timing) ddl_reset_core_time_variables(ENC_OP_TIME); if (!DDL_IS_INITIALIZED(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_end:Not_inited"); return VCD_ERR_ILLEGAL_OP; } if (DDL_IS_BUSY(ddl_context)) { VIDC_LOGERR_STRING("ddl_enc_end:Ddl_busy"); return VCD_ERR_BUSY; } if (!ddl || ddl->decoding) { VIDC_LOGERR_STRING("ddl_enc_end:Bad_handle"); return VCD_ERR_BAD_HANDLE; } if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FATAL_ERROR)) { VIDC_LOGERR_STRING("ddl_enc_end:Wrong_state"); return VCD_ERR_ILLEGAL_OP; } DDL_BUSY(ddl_context); ddl_context->current_ddl = ddl; ddl_context->client_data = client_data; ddl_channel_end(ddl); return VCD_S_SUCCESS; } u32 ddl_reset_hw(u32 mode) { struct ddl_context *ddl_context; struct ddl_client_context *ddl; int i_client_num; VIDC_LOG_STRING("ddl_reset_hw:called"); ddl_context = ddl_get_context(); ddl_move_command_state(ddl_context, DDL_CMD_INVALID); DDL_BUSY(ddl_context); if (ddl_context->core_virtual_base_addr) vidc_720p_do_sw_reset(); ddl_context->device_state = DDL_DEVICE_NOTINIT; for (i_client_num = 0; i_client_num < VCD_MAX_NO_CLIENT; ++i_client_num) { ddl = ddl_context->ddl_clients[i_client_num]; ddl_context->ddl_clients[i_client_num] = NULL; if (ddl) { ddl_release_client_internal_buffers(ddl); ddl_client_transact(DDL_FREE_CLIENT, &ddl); } } ddl_release_context_buffers(ddl_context); DDL_MEMSET(ddl_context, 0, sizeof(struct ddl_context)); return true; }
gpl-2.0
roguesyko/reaper_kernel_lge_g3
arch/powerpc/platforms/ps3/mm.c
4412
32572
/* * PS3 address space management. * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/memory_hotplug.h> #include <linux/memblock.h> #include <linux/slab.h> #include <asm/cell-regs.h> #include <asm/firmware.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/lv1call.h> #include <asm/setup.h> #include "platform.h" #if defined(DEBUG) #define DBG udbg_printf #else #define DBG pr_devel #endif enum { #if defined(CONFIG_PS3_DYNAMIC_DMA) USE_DYNAMIC_DMA = 1, #else USE_DYNAMIC_DMA = 0, #endif }; enum { PAGE_SHIFT_4K = 12U, PAGE_SHIFT_64K = 16U, PAGE_SHIFT_16M = 24U, }; static unsigned long make_page_sizes(unsigned long a, unsigned long b) { return (a << 56) | (b << 48); } enum { ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04, ALLOCATE_MEMORY_ADDR_ZERO = 0X08, }; /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */ enum { HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */ HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */ }; /*============================================================================*/ /* virtual address space routines */ /*============================================================================*/ /** * struct mem_region - memory region structure * @base: base address * @size: size in bytes * @offset: difference between base and rm.size */ struct mem_region { u64 base; u64 size; unsigned long offset; }; /** * struct map - address space state variables holder * @total: total memory available as reported by HV * @vas_id - HV virtual address space id * @htab_size: htab size in bytes * * The HV virtual address space (vas) allows for hotplug memory regions. * Memory regions can be created and destroyed in the vas at runtime. * @rm: real mode (bootmem) region * @r1: hotplug memory region(s) * * ps3 addresses * virt_addr: a cpu 'translated' effective address * phys_addr: an address in what Linux thinks is the physical address space * lpar_addr: an address in the HV virtual address space * bus_addr: an io controller 'translated' address on a device bus */ struct map { u64 total; u64 vas_id; u64 htab_size; struct mem_region rm; struct mem_region r1; }; #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__) static void __maybe_unused _debug_dump_map(const struct map *m, const char *func, int line) { DBG("%s:%d: map.total = %llxh\n", func, line, m->total); DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size); DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id); DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size); DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base); DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset); DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size); } static struct map map; /** * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address * @phys_addr: linux physical address */ unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr) { BUG_ON(is_kernel_addr(phys_addr)); return (phys_addr < map.rm.size || phys_addr >= map.total) ? phys_addr : phys_addr + map.r1.offset; } EXPORT_SYMBOL(ps3_mm_phys_to_lpar); /** * ps3_mm_vas_create - create the virtual address space */ void __init ps3_mm_vas_create(unsigned long* htab_size) { int result; u64 start_address; u64 size; u64 access_right; u64 max_page_size; u64 flags; result = lv1_query_logical_partition_address_region_info(0, &start_address, &size, &access_right, &max_page_size, &flags); if (result) { DBG("%s:%d: lv1_query_logical_partition_address_region_info " "failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail; } if (max_page_size < PAGE_SHIFT_16M) { DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__, max_page_size); goto fail; } BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX); BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN); result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE, 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K), &map.vas_id, &map.htab_size); if (result) { DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail; } result = lv1_select_virtual_address_space(map.vas_id); if (result) { DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail; } *htab_size = map.htab_size; debug_dump_map(&map); return; fail: panic("ps3_mm_vas_create failed"); } /** * ps3_mm_vas_destroy - */ void ps3_mm_vas_destroy(void) { int result; DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); if (map.vas_id) { result = lv1_select_virtual_address_space(0); BUG_ON(result); result = lv1_destruct_virtual_address_space(map.vas_id); BUG_ON(result); map.vas_id = 0; } } /*============================================================================*/ /* memory hotplug routines */ /*============================================================================*/ /** * ps3_mm_region_create - create a memory region in the vas * @r: pointer to a struct mem_region to accept initialized values * @size: requested region size * * This implementation creates the region with the vas large page size. * @size is rounded down to a multiple of the vas large page size. */ static int ps3_mm_region_create(struct mem_region *r, unsigned long size) { int result; u64 muid; r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M); DBG("%s:%d requested %lxh\n", __func__, __LINE__, size); DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size); DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__, size - r->size, (size - r->size) / 1024 / 1024); if (r->size == 0) { DBG("%s:%d: size == 0\n", __func__, __LINE__); result = -1; goto zero_region; } result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0, ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid); if (result || r->base < map.rm.size) { DBG("%s:%d: lv1_allocate_memory failed: %s\n", __func__, __LINE__, ps3_result(result)); goto zero_region; } r->offset = r->base - map.rm.size; return result; zero_region: r->size = r->base = r->offset = 0; return result; } /** * ps3_mm_region_destroy - destroy a memory region * @r: pointer to struct mem_region */ static void ps3_mm_region_destroy(struct mem_region *r) { int result; DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); if (r->base) { result = lv1_release_memory(r->base); BUG_ON(result); r->size = r->base = r->offset = 0; map.total = map.rm.size; } } /** * ps3_mm_add_memory - hot add memory */ static int __init ps3_mm_add_memory(void) { int result; unsigned long start_addr; unsigned long start_pfn; unsigned long nr_pages; if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) return -ENODEV; BUG_ON(!mem_init_done); start_addr = map.rm.size; start_pfn = start_addr >> PAGE_SHIFT; nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n", __func__, __LINE__, start_addr, start_pfn, nr_pages); result = add_memory(0, start_addr, map.r1.size); if (result) { pr_err("%s:%d: add_memory failed: (%d)\n", __func__, __LINE__, result); return result; } memblock_add(start_addr, map.r1.size); result = online_pages(start_pfn, nr_pages); if (result) pr_err("%s:%d: online_pages failed: (%d)\n", __func__, __LINE__, result); return result; } device_initcall(ps3_mm_add_memory); /*============================================================================*/ /* dma routines */ /*============================================================================*/ /** * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address. * @r: pointer to dma region structure * @lpar_addr: HV lpar address */ static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r, unsigned long lpar_addr) { if (lpar_addr >= map.rm.size) lpar_addr -= map.r1.offset; BUG_ON(lpar_addr < r->offset); BUG_ON(lpar_addr >= r->offset + r->len); return r->bus_addr + lpar_addr - r->offset; } #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__) static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r, const char *func, int line) { DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id, r->dev->dev_id); DBG("%s:%d: page_size %u\n", func, line, r->page_size); DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr); DBG("%s:%d: len %lxh\n", func, line, r->len); DBG("%s:%d: offset %lxh\n", func, line, r->offset); } /** * dma_chunk - A chunk of dma pages mapped by the io controller. * @region - The dma region that owns this chunk. * @lpar_addr: Starting lpar address of the area to map. * @bus_addr: Starting ioc bus address of the area to map. * @len: Length in bytes of the area to map. * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the * list of all chuncks owned by the region. * * This implementation uses a very simple dma page manager * based on the dma_chunk structure. This scheme assumes * that all drivers use very well behaved dma ops. */ struct dma_chunk { struct ps3_dma_region *region; unsigned long lpar_addr; unsigned long bus_addr; unsigned long len; struct list_head link; unsigned int usage_count; }; #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__) static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, int line) { DBG("%s:%d: r.dev %llu:%llu\n", func, line, c->region->dev->bus_id, c->region->dev->dev_id); DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset); DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); DBG("%s:%d: c.len %lxh\n", func, line, c->len); } static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r, unsigned long bus_addr, unsigned long len) { struct dma_chunk *c; unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus, 1 << r->page_size); list_for_each_entry(c, &r->chunk_list.head, link) { /* intersection */ if (aligned_bus >= c->bus_addr && aligned_bus + aligned_len <= c->bus_addr + c->len) return c; /* below */ if (aligned_bus + aligned_len <= c->bus_addr) continue; /* above */ if (aligned_bus >= c->bus_addr + c->len) continue; /* we don't handle the multi-chunk case for now */ dma_dump_chunk(c); BUG(); } return NULL; } static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r, unsigned long lpar_addr, unsigned long len) { struct dma_chunk *c; unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar, 1 << r->page_size); list_for_each_entry(c, &r->chunk_list.head, link) { /* intersection */ if (c->lpar_addr <= aligned_lpar && aligned_lpar < c->lpar_addr + c->len) { if (aligned_lpar + aligned_len <= c->lpar_addr + c->len) return c; else { dma_dump_chunk(c); BUG(); } } /* below */ if (aligned_lpar + aligned_len <= c->lpar_addr) { continue; } /* above */ if (c->lpar_addr + c->len <= aligned_lpar) { continue; } } return NULL; } static int dma_sb_free_chunk(struct dma_chunk *c) { int result = 0; if (c->bus_addr) { result = lv1_unmap_device_dma_region(c->region->dev->bus_id, c->region->dev->dev_id, c->bus_addr, c->len); BUG_ON(result); } kfree(c); return result; } static int dma_ioc0_free_chunk(struct dma_chunk *c) { int result = 0; int iopage; unsigned long offset; struct ps3_dma_region *r = c->region; DBG("%s:start\n", __func__); for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) { offset = (1 << r->page_size) * iopage; /* put INVALID entry */ result = lv1_put_iopte(0, c->bus_addr + offset, c->lpar_addr + offset, r->ioid, 0); DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__, c->bus_addr + offset, c->lpar_addr + offset, r->ioid); if (result) { DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__, __LINE__, ps3_result(result)); } } kfree(c); DBG("%s:end\n", __func__); return result; } /** * dma_sb_map_pages - Maps dma pages into the io controller bus address space. * @r: Pointer to a struct ps3_dma_region. * @phys_addr: Starting physical address of the area to map. * @len: Length in bytes of the area to map. * c_out: A pointer to receive an allocated struct dma_chunk for this area. * * This is the lowest level dma mapping routine, and is the one that will * make the HV call to add the pages into the io controller address space. */ static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, unsigned long len, struct dma_chunk **c_out, u64 iopte_flag) { int result; struct dma_chunk *c; c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); if (!c) { result = -ENOMEM; goto fail_alloc; } c->region = r; c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr); c->len = len; BUG_ON(iopte_flag != 0xf800000000000000UL); result = lv1_map_device_dma_region(c->region->dev->bus_id, c->region->dev->dev_id, c->lpar_addr, c->bus_addr, c->len, iopte_flag); if (result) { DBG("%s:%d: lv1_map_device_dma_region failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail_map; } list_add(&c->link, &r->chunk_list.head); *c_out = c; return 0; fail_map: kfree(c); fail_alloc: *c_out = NULL; DBG(" <- %s:%d\n", __func__, __LINE__); return result; } static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr, unsigned long len, struct dma_chunk **c_out, u64 iopte_flag) { int result; struct dma_chunk *c, *last; int iopage, pages; unsigned long offset; DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__, phys_addr, ps3_mm_phys_to_lpar(phys_addr), len); c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC); if (!c) { result = -ENOMEM; goto fail_alloc; } c->region = r; c->len = len; c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); /* allocate IO address */ if (list_empty(&r->chunk_list.head)) { /* first one */ c->bus_addr = r->bus_addr; } else { /* derive from last bus addr*/ last = list_entry(r->chunk_list.head.next, struct dma_chunk, link); c->bus_addr = last->bus_addr + last->len; DBG("%s: last bus=%#lx, len=%#lx\n", __func__, last->bus_addr, last->len); } /* FIXME: check whether length exceeds region size */ /* build ioptes for the area */ pages = len >> r->page_size; DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__, r->page_size, r->len, pages, iopte_flag); for (iopage = 0; iopage < pages; iopage++) { offset = (1 << r->page_size) * iopage; result = lv1_put_iopte(0, c->bus_addr + offset, c->lpar_addr + offset, r->ioid, iopte_flag); if (result) { pr_warning("%s:%d: lv1_put_iopte failed: %s\n", __func__, __LINE__, ps3_result(result)); goto fail_map; } DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__, iopage, c->bus_addr + offset, c->lpar_addr + offset, r->ioid); } /* be sure that last allocated one is inserted at head */ list_add(&c->link, &r->chunk_list.head); *c_out = c; DBG("%s: end\n", __func__); return 0; fail_map: for (iopage--; 0 <= iopage; iopage--) { lv1_put_iopte(0, c->bus_addr + offset, c->lpar_addr + offset, r->ioid, 0); } kfree(c); fail_alloc: *c_out = NULL; return result; } /** * dma_sb_region_create - Create a device dma region. * @r: Pointer to a struct ps3_dma_region. * * This is the lowest level dma region create routine, and is the one that * will make the HV call to create the region. */ static int dma_sb_region_create(struct ps3_dma_region *r) { int result; u64 bus_addr; DBG(" -> %s:%d:\n", __func__, __LINE__); BUG_ON(!r); if (!r->dev->bus_id) { pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__, r->dev->bus_id, r->dev->dev_id); return 0; } DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__, __LINE__, r->len, r->page_size, r->offset); BUG_ON(!r->len); BUG_ON(!r->page_size); BUG_ON(!r->region_ops); INIT_LIST_HEAD(&r->chunk_list.head); spin_lock_init(&r->chunk_list.lock); result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id, roundup_pow_of_two(r->len), r->page_size, r->region_type, &bus_addr); r->bus_addr = bus_addr; if (result) { DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n", __func__, __LINE__, ps3_result(result)); r->len = r->bus_addr = 0; } return result; } static int dma_ioc0_region_create(struct ps3_dma_region *r) { int result; u64 bus_addr; INIT_LIST_HEAD(&r->chunk_list.head); spin_lock_init(&r->chunk_list.lock); result = lv1_allocate_io_segment(0, r->len, r->page_size, &bus_addr); r->bus_addr = bus_addr; if (result) { DBG("%s:%d: lv1_allocate_io_segment failed: %s\n", __func__, __LINE__, ps3_result(result)); r->len = r->bus_addr = 0; } DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__, r->len, r->page_size, r->bus_addr); return result; } /** * dma_region_free - Free a device dma region. * @r: Pointer to a struct ps3_dma_region. * * This is the lowest level dma region free routine, and is the one that * will make the HV call to free the region. */ static int dma_sb_region_free(struct ps3_dma_region *r) { int result; struct dma_chunk *c; struct dma_chunk *tmp; BUG_ON(!r); if (!r->dev->bus_id) { pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__, r->dev->bus_id, r->dev->dev_id); return 0; } list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { list_del(&c->link); dma_sb_free_chunk(c); } result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id, r->bus_addr); if (result) DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", __func__, __LINE__, ps3_result(result)); r->bus_addr = 0; return result; } static int dma_ioc0_region_free(struct ps3_dma_region *r) { int result; struct dma_chunk *c, *n; DBG("%s: start\n", __func__); list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { list_del(&c->link); dma_ioc0_free_chunk(c); } result = lv1_release_io_segment(0, r->bus_addr); if (result) DBG("%s:%d: lv1_free_device_dma_region failed: %s\n", __func__, __LINE__, ps3_result(result)); r->bus_addr = 0; DBG("%s: end\n", __func__); return result; } /** * dma_sb_map_area - Map an area of memory into a device dma region. * @r: Pointer to a struct ps3_dma_region. * @virt_addr: Starting virtual address of the area to map. * @len: Length in bytes of the area to map. * @bus_addr: A pointer to return the starting ioc bus address of the area to * map. * * This is the common dma mapping routine. */ static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, u64 iopte_flag) { int result; unsigned long flags; struct dma_chunk *c; unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 1 << r->page_size); *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); if (!USE_DYNAMIC_DMA) { unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr); DBG(" -> %s:%d\n", __func__, __LINE__); DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__, virt_addr); DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__, phys_addr); DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__, lpar_addr); DBG("%s:%d len %lxh\n", __func__, __LINE__, len); DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__, *bus_addr, len); } spin_lock_irqsave(&r->chunk_list.lock, flags); c = dma_find_chunk(r, *bus_addr, len); if (c) { DBG("%s:%d: reusing mapped chunk", __func__, __LINE__); dma_dump_chunk(c); c->usage_count++; spin_unlock_irqrestore(&r->chunk_list.lock, flags); return 0; } result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag); if (result) { *bus_addr = 0; DBG("%s:%d: dma_sb_map_pages failed (%d)\n", __func__, __LINE__, result); spin_unlock_irqrestore(&r->chunk_list.lock, flags); return result; } c->usage_count = 1; spin_unlock_irqrestore(&r->chunk_list.lock, flags); return result; } static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, u64 iopte_flag) { int result; unsigned long flags; struct dma_chunk *c; unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys, 1 << r->page_size); DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__, virt_addr, len); DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__, phys_addr, aligned_phys, aligned_len); spin_lock_irqsave(&r->chunk_list.lock, flags); c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len); if (c) { /* FIXME */ BUG(); *bus_addr = c->bus_addr + phys_addr - aligned_phys; c->usage_count++; spin_unlock_irqrestore(&r->chunk_list.lock, flags); return 0; } result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag); if (result) { *bus_addr = 0; DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n", __func__, __LINE__, result); spin_unlock_irqrestore(&r->chunk_list.lock, flags); return result; } *bus_addr = c->bus_addr + phys_addr - aligned_phys; DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__, virt_addr, phys_addr, aligned_phys, *bus_addr); c->usage_count = 1; spin_unlock_irqrestore(&r->chunk_list.lock, flags); return result; } /** * dma_sb_unmap_area - Unmap an area of memory from a device dma region. * @r: Pointer to a struct ps3_dma_region. * @bus_addr: The starting ioc bus address of the area to unmap. * @len: Length in bytes of the area to unmap. * * This is the common dma unmap routine. */ static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, unsigned long len) { unsigned long flags; struct dma_chunk *c; spin_lock_irqsave(&r->chunk_list.lock, flags); c = dma_find_chunk(r, bus_addr, len); if (!c) { unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + bus_addr - aligned_bus, 1 << r->page_size); DBG("%s:%d: not found: bus_addr %llxh\n", __func__, __LINE__, bus_addr); DBG("%s:%d: not found: len %lxh\n", __func__, __LINE__, len); DBG("%s:%d: not found: aligned_bus %lxh\n", __func__, __LINE__, aligned_bus); DBG("%s:%d: not found: aligned_len %lxh\n", __func__, __LINE__, aligned_len); BUG(); } c->usage_count--; if (!c->usage_count) { list_del(&c->link); dma_sb_free_chunk(c); } spin_unlock_irqrestore(&r->chunk_list.lock, flags); return 0; } static int dma_ioc0_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr, unsigned long len) { unsigned long flags; struct dma_chunk *c; DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len); spin_lock_irqsave(&r->chunk_list.lock, flags); c = dma_find_chunk(r, bus_addr, len); if (!c) { unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size); unsigned long aligned_len = _ALIGN_UP(len + bus_addr - aligned_bus, 1 << r->page_size); DBG("%s:%d: not found: bus_addr %llxh\n", __func__, __LINE__, bus_addr); DBG("%s:%d: not found: len %lxh\n", __func__, __LINE__, len); DBG("%s:%d: not found: aligned_bus %lxh\n", __func__, __LINE__, aligned_bus); DBG("%s:%d: not found: aligned_len %lxh\n", __func__, __LINE__, aligned_len); BUG(); } c->usage_count--; if (!c->usage_count) { list_del(&c->link); dma_ioc0_free_chunk(c); } spin_unlock_irqrestore(&r->chunk_list.lock, flags); DBG("%s: end\n", __func__); return 0; } /** * dma_sb_region_create_linear - Setup a linear dma mapping for a device. * @r: Pointer to a struct ps3_dma_region. * * This routine creates an HV dma region for the device and maps all available * ram into the io controller bus address space. */ static int dma_sb_region_create_linear(struct ps3_dma_region *r) { int result; unsigned long virt_addr, len; dma_addr_t tmp; if (r->len > 16*1024*1024) { /* FIXME: need proper fix */ /* force 16M dma pages for linear mapping */ if (r->page_size != PS3_DMA_16M) { pr_info("%s:%d: forcing 16M pages for linear map\n", __func__, __LINE__); r->page_size = PS3_DMA_16M; r->len = _ALIGN_UP(r->len, 1 << r->page_size); } } result = dma_sb_region_create(r); BUG_ON(result); if (r->offset < map.rm.size) { /* Map (part of) 1st RAM chunk */ virt_addr = map.rm.base + r->offset; len = map.rm.size - r->offset; if (len > r->len) len = r->len; result = dma_sb_map_area(r, virt_addr, len, &tmp, CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW | CBE_IOPTE_M); BUG_ON(result); } if (r->offset + r->len > map.rm.size) { /* Map (part of) 2nd RAM chunk */ virt_addr = map.rm.size; len = r->len; if (r->offset >= map.rm.size) virt_addr += r->offset - map.rm.size; else len -= map.rm.size - r->offset; result = dma_sb_map_area(r, virt_addr, len, &tmp, CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW | CBE_IOPTE_M); BUG_ON(result); } return result; } /** * dma_sb_region_free_linear - Free a linear dma mapping for a device. * @r: Pointer to a struct ps3_dma_region. * * This routine will unmap all mapped areas and free the HV dma region. */ static int dma_sb_region_free_linear(struct ps3_dma_region *r) { int result; dma_addr_t bus_addr; unsigned long len, lpar_addr; if (r->offset < map.rm.size) { /* Unmap (part of) 1st RAM chunk */ lpar_addr = map.rm.base + r->offset; len = map.rm.size - r->offset; if (len > r->len) len = r->len; bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); result = dma_sb_unmap_area(r, bus_addr, len); BUG_ON(result); } if (r->offset + r->len > map.rm.size) { /* Unmap (part of) 2nd RAM chunk */ lpar_addr = map.r1.base; len = r->len; if (r->offset >= map.rm.size) lpar_addr += r->offset - map.rm.size; else len -= map.rm.size - r->offset; bus_addr = dma_sb_lpar_to_bus(r, lpar_addr); result = dma_sb_unmap_area(r, bus_addr, len); BUG_ON(result); } result = dma_sb_region_free(r); BUG_ON(result); return result; } /** * dma_sb_map_area_linear - Map an area of memory into a device dma region. * @r: Pointer to a struct ps3_dma_region. * @virt_addr: Starting virtual address of the area to map. * @len: Length in bytes of the area to map. * @bus_addr: A pointer to return the starting ioc bus address of the area to * map. * * This routine just returns the corresponding bus address. Actual mapping * occurs in dma_region_create_linear(). */ static int dma_sb_map_area_linear(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, u64 iopte_flag) { unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr) : virt_addr; *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr)); return 0; } /** * dma_unmap_area_linear - Unmap an area of memory from a device dma region. * @r: Pointer to a struct ps3_dma_region. * @bus_addr: The starting ioc bus address of the area to unmap. * @len: Length in bytes of the area to unmap. * * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear(). */ static int dma_sb_unmap_area_linear(struct ps3_dma_region *r, dma_addr_t bus_addr, unsigned long len) { return 0; }; static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = { .create = dma_sb_region_create, .free = dma_sb_region_free, .map = dma_sb_map_area, .unmap = dma_sb_unmap_area }; static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = { .create = dma_sb_region_create_linear, .free = dma_sb_region_free_linear, .map = dma_sb_map_area_linear, .unmap = dma_sb_unmap_area_linear }; static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = { .create = dma_ioc0_region_create, .free = dma_ioc0_region_free, .map = dma_ioc0_map_area, .unmap = dma_ioc0_unmap_area }; int ps3_dma_region_init(struct ps3_system_bus_device *dev, struct ps3_dma_region *r, enum ps3_dma_page_size page_size, enum ps3_dma_region_type region_type, void *addr, unsigned long len) { unsigned long lpar_addr; lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0; r->dev = dev; r->page_size = page_size; r->region_type = region_type; r->offset = lpar_addr; if (r->offset >= map.rm.size) r->offset -= map.r1.offset; r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); switch (dev->dev_type) { case PS3_DEVICE_TYPE_SB: r->region_ops = (USE_DYNAMIC_DMA) ? &ps3_dma_sb_region_ops : &ps3_dma_sb_region_linear_ops; break; case PS3_DEVICE_TYPE_IOC0: r->region_ops = &ps3_dma_ioc0_region_ops; break; default: BUG(); return -EINVAL; } return 0; } EXPORT_SYMBOL(ps3_dma_region_init); int ps3_dma_region_create(struct ps3_dma_region *r) { BUG_ON(!r); BUG_ON(!r->region_ops); BUG_ON(!r->region_ops->create); return r->region_ops->create(r); } EXPORT_SYMBOL(ps3_dma_region_create); int ps3_dma_region_free(struct ps3_dma_region *r) { BUG_ON(!r); BUG_ON(!r->region_ops); BUG_ON(!r->region_ops->free); return r->region_ops->free(r); } EXPORT_SYMBOL(ps3_dma_region_free); int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr, u64 iopte_flag) { return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag); } int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr, unsigned long len) { return r->region_ops->unmap(r, bus_addr, len); } /*============================================================================*/ /* system startup routines */ /*============================================================================*/ /** * ps3_mm_init - initialize the address space state variables */ void __init ps3_mm_init(void) { int result; DBG(" -> %s:%d\n", __func__, __LINE__); result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, &map.total); if (result) panic("ps3_repository_read_mm_info() failed"); map.rm.offset = map.rm.base; map.vas_id = map.htab_size = 0; /* this implementation assumes map.rm.base is zero */ BUG_ON(map.rm.base); BUG_ON(!map.rm.size); /* arrange to do this in ps3_mm_add_memory */ ps3_mm_region_create(&map.r1, map.total - map.rm.size); /* correct map.total for the real total amount of memory we use */ map.total = map.rm.size + map.r1.size; DBG(" <- %s:%d\n", __func__, __LINE__); } /** * ps3_mm_shutdown - final cleanup of address space */ void ps3_mm_shutdown(void) { ps3_mm_region_destroy(&map.r1); }
gpl-2.0
TeamRegular/android_kernel_lge_iproj
drivers/net/wireless/ath/ath5k/initvals.c
4924
51537
/* * Initial register settings functions * * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ #include "ath5k.h" #include "reg.h" #include "debug.h" /** * struct ath5k_ini - Mode-independent initial register writes * @ini_register: Register address * @ini_value: Default value * @ini_mode: 0 to write 1 to read (and clear) */ struct ath5k_ini { u16 ini_register; u32 ini_value; enum { AR5K_INI_WRITE = 0, /* Default */ AR5K_INI_READ = 1, } ini_mode; }; /** * struct ath5k_ini_mode - Mode specific initial register values * @mode_register: Register address * @mode_value: Set of values for each enum ath5k_driver_mode */ struct ath5k_ini_mode { u16 mode_register; u32 mode_value[3]; }; /* Initial register settings for AR5210 */ static const struct ath5k_ini ar5210_ini[] = { /* PCU and MAC registers */ { AR5K_NOQCU_TXDP0, 0 }, { AR5K_NOQCU_TXDP1, 0 }, { AR5K_RXDP, 0 }, { AR5K_CR, 0 }, { AR5K_ISR, 0, AR5K_INI_READ }, { AR5K_IMR, 0 }, { AR5K_IER, AR5K_IER_DISABLE }, { AR5K_BSR, 0, AR5K_INI_READ }, { AR5K_TXCFG, AR5K_DMASIZE_128B }, { AR5K_RXCFG, AR5K_DMASIZE_128B }, { AR5K_CFG, AR5K_INIT_CFG }, { AR5K_TOPS, 8 }, { AR5K_RXNOFRM, 8 }, { AR5K_RPGTO, 0 }, { AR5K_TXNOFRM, 0 }, { AR5K_SFR, 0 }, { AR5K_MIBC, 0 }, { AR5K_MISC, 0 }, { AR5K_RX_FILTER_5210, 0 }, { AR5K_MCAST_FILTER0_5210, 0 }, { AR5K_MCAST_FILTER1_5210, 0 }, { AR5K_TX_MASK0, 0 }, { AR5K_TX_MASK1, 0 }, { AR5K_CLR_TMASK, 0 }, { AR5K_TRIG_LVL, AR5K_TUNE_MIN_TX_FIFO_THRES }, { AR5K_DIAG_SW_5210, 0 }, { AR5K_RSSI_THR, AR5K_TUNE_RSSI_THRES }, { AR5K_TSF_L32_5210, 0 }, { AR5K_TIMER0_5210, 0 }, { AR5K_TIMER1_5210, 0xffffffff }, { AR5K_TIMER2_5210, 0xffffffff }, { AR5K_TIMER3_5210, 1 }, { AR5K_CFP_DUR_5210, 0 }, { AR5K_CFP_PERIOD_5210, 0 }, /* PHY registers */ { AR5K_PHY(0), 0x00000047 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY(3), 0x09848ea6 }, { AR5K_PHY(4), 0x3d32e000 }, { AR5K_PHY(5), 0x0000076b }, { AR5K_PHY_ACT, AR5K_PHY_ACT_DISABLE }, { AR5K_PHY(8), 0x02020200 }, { AR5K_PHY(9), 0x00000e0e }, { AR5K_PHY(10), 0x0a020201 }, { AR5K_PHY(11), 0x00036ffc }, { AR5K_PHY(12), 0x00000000 }, { AR5K_PHY(13), 0x00000e0e }, { AR5K_PHY(14), 0x00000007 }, { AR5K_PHY(15), 0x00020100 }, { AR5K_PHY(16), 0x89630000 }, { AR5K_PHY(17), 0x1372169c }, { AR5K_PHY(18), 0x0018b633 }, { AR5K_PHY(19), 0x1284613c }, { AR5K_PHY(20), 0x0de8b8e0 }, { AR5K_PHY(21), 0x00074859 }, { AR5K_PHY(22), 0x7e80beba }, { AR5K_PHY(23), 0x313a665e }, { AR5K_PHY_AGCCTL, 0x00001d08 }, { AR5K_PHY(25), 0x0001ce00 }, { AR5K_PHY(26), 0x409a4190 }, { AR5K_PHY(28), 0x0000000f }, { AR5K_PHY(29), 0x00000080 }, { AR5K_PHY(30), 0x00000004 }, { AR5K_PHY(31), 0x00000018 }, /* 0x987c */ { AR5K_PHY(64), 0x00000000 }, /* 0x9900 */ { AR5K_PHY(65), 0x00000000 }, { AR5K_PHY(66), 0x00000000 }, { AR5K_PHY(67), 0x00800000 }, { AR5K_PHY(68), 0x00000003 }, /* BB gain table (64bytes) */ { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000020 }, { AR5K_BB_GAIN(2), 0x00000010 }, { AR5K_BB_GAIN(3), 0x00000030 }, { AR5K_BB_GAIN(4), 0x00000008 }, { AR5K_BB_GAIN(5), 0x00000028 }, { AR5K_BB_GAIN(6), 0x00000028 }, { AR5K_BB_GAIN(7), 0x00000004 }, { AR5K_BB_GAIN(8), 0x00000024 }, { AR5K_BB_GAIN(9), 0x00000014 }, { AR5K_BB_GAIN(10), 0x00000034 }, { AR5K_BB_GAIN(11), 0x0000000c }, { AR5K_BB_GAIN(12), 0x0000002c }, { AR5K_BB_GAIN(13), 0x00000002 }, { AR5K_BB_GAIN(14), 0x00000022 }, { AR5K_BB_GAIN(15), 0x00000012 }, { AR5K_BB_GAIN(16), 0x00000032 }, { AR5K_BB_GAIN(17), 0x0000000a }, { AR5K_BB_GAIN(18), 0x0000002a }, { AR5K_BB_GAIN(19), 0x00000001 }, { AR5K_BB_GAIN(20), 0x00000021 }, { AR5K_BB_GAIN(21), 0x00000011 }, { AR5K_BB_GAIN(22), 0x00000031 }, { AR5K_BB_GAIN(23), 0x00000009 }, { AR5K_BB_GAIN(24), 0x00000029 }, { AR5K_BB_GAIN(25), 0x00000005 }, { AR5K_BB_GAIN(26), 0x00000025 }, { AR5K_BB_GAIN(27), 0x00000015 }, { AR5K_BB_GAIN(28), 0x00000035 }, { AR5K_BB_GAIN(29), 0x0000000d }, { AR5K_BB_GAIN(30), 0x0000002d }, { AR5K_BB_GAIN(31), 0x00000003 }, { AR5K_BB_GAIN(32), 0x00000023 }, { AR5K_BB_GAIN(33), 0x00000013 }, { AR5K_BB_GAIN(34), 0x00000033 }, { AR5K_BB_GAIN(35), 0x0000000b }, { AR5K_BB_GAIN(36), 0x0000002b }, { AR5K_BB_GAIN(37), 0x00000007 }, { AR5K_BB_GAIN(38), 0x00000027 }, { AR5K_BB_GAIN(39), 0x00000017 }, { AR5K_BB_GAIN(40), 0x00000037 }, { AR5K_BB_GAIN(41), 0x0000000f }, { AR5K_BB_GAIN(42), 0x0000002f }, { AR5K_BB_GAIN(43), 0x0000002f }, { AR5K_BB_GAIN(44), 0x0000002f }, { AR5K_BB_GAIN(45), 0x0000002f }, { AR5K_BB_GAIN(46), 0x0000002f }, { AR5K_BB_GAIN(47), 0x0000002f }, { AR5K_BB_GAIN(48), 0x0000002f }, { AR5K_BB_GAIN(49), 0x0000002f }, { AR5K_BB_GAIN(50), 0x0000002f }, { AR5K_BB_GAIN(51), 0x0000002f }, { AR5K_BB_GAIN(52), 0x0000002f }, { AR5K_BB_GAIN(53), 0x0000002f }, { AR5K_BB_GAIN(54), 0x0000002f }, { AR5K_BB_GAIN(55), 0x0000002f }, { AR5K_BB_GAIN(56), 0x0000002f }, { AR5K_BB_GAIN(57), 0x0000002f }, { AR5K_BB_GAIN(58), 0x0000002f }, { AR5K_BB_GAIN(59), 0x0000002f }, { AR5K_BB_GAIN(60), 0x0000002f }, { AR5K_BB_GAIN(61), 0x0000002f }, { AR5K_BB_GAIN(62), 0x0000002f }, { AR5K_BB_GAIN(63), 0x0000002f }, /* 5110 RF gain table (64btes) */ { AR5K_RF_GAIN(0), 0x0000001d }, { AR5K_RF_GAIN(1), 0x0000005d }, { AR5K_RF_GAIN(2), 0x0000009d }, { AR5K_RF_GAIN(3), 0x000000dd }, { AR5K_RF_GAIN(4), 0x0000011d }, { AR5K_RF_GAIN(5), 0x00000021 }, { AR5K_RF_GAIN(6), 0x00000061 }, { AR5K_RF_GAIN(7), 0x000000a1 }, { AR5K_RF_GAIN(8), 0x000000e1 }, { AR5K_RF_GAIN(9), 0x00000031 }, { AR5K_RF_GAIN(10), 0x00000071 }, { AR5K_RF_GAIN(11), 0x000000b1 }, { AR5K_RF_GAIN(12), 0x0000001c }, { AR5K_RF_GAIN(13), 0x0000005c }, { AR5K_RF_GAIN(14), 0x00000029 }, { AR5K_RF_GAIN(15), 0x00000069 }, { AR5K_RF_GAIN(16), 0x000000a9 }, { AR5K_RF_GAIN(17), 0x00000020 }, { AR5K_RF_GAIN(18), 0x00000019 }, { AR5K_RF_GAIN(19), 0x00000059 }, { AR5K_RF_GAIN(20), 0x00000099 }, { AR5K_RF_GAIN(21), 0x00000030 }, { AR5K_RF_GAIN(22), 0x00000005 }, { AR5K_RF_GAIN(23), 0x00000025 }, { AR5K_RF_GAIN(24), 0x00000065 }, { AR5K_RF_GAIN(25), 0x000000a5 }, { AR5K_RF_GAIN(26), 0x00000028 }, { AR5K_RF_GAIN(27), 0x00000068 }, { AR5K_RF_GAIN(28), 0x0000001f }, { AR5K_RF_GAIN(29), 0x0000001e }, { AR5K_RF_GAIN(30), 0x00000018 }, { AR5K_RF_GAIN(31), 0x00000058 }, { AR5K_RF_GAIN(32), 0x00000098 }, { AR5K_RF_GAIN(33), 0x00000003 }, { AR5K_RF_GAIN(34), 0x00000004 }, { AR5K_RF_GAIN(35), 0x00000044 }, { AR5K_RF_GAIN(36), 0x00000084 }, { AR5K_RF_GAIN(37), 0x00000013 }, { AR5K_RF_GAIN(38), 0x00000012 }, { AR5K_RF_GAIN(39), 0x00000052 }, { AR5K_RF_GAIN(40), 0x00000092 }, { AR5K_RF_GAIN(41), 0x000000d2 }, { AR5K_RF_GAIN(42), 0x0000002b }, { AR5K_RF_GAIN(43), 0x0000002a }, { AR5K_RF_GAIN(44), 0x0000006a }, { AR5K_RF_GAIN(45), 0x000000aa }, { AR5K_RF_GAIN(46), 0x0000001b }, { AR5K_RF_GAIN(47), 0x0000001a }, { AR5K_RF_GAIN(48), 0x0000005a }, { AR5K_RF_GAIN(49), 0x0000009a }, { AR5K_RF_GAIN(50), 0x000000da }, { AR5K_RF_GAIN(51), 0x00000006 }, { AR5K_RF_GAIN(52), 0x00000006 }, { AR5K_RF_GAIN(53), 0x00000006 }, { AR5K_RF_GAIN(54), 0x00000006 }, { AR5K_RF_GAIN(55), 0x00000006 }, { AR5K_RF_GAIN(56), 0x00000006 }, { AR5K_RF_GAIN(57), 0x00000006 }, { AR5K_RF_GAIN(58), 0x00000006 }, { AR5K_RF_GAIN(59), 0x00000006 }, { AR5K_RF_GAIN(60), 0x00000006 }, { AR5K_RF_GAIN(61), 0x00000006 }, { AR5K_RF_GAIN(62), 0x00000006 }, { AR5K_RF_GAIN(63), 0x00000006 }, /* PHY activation */ { AR5K_PHY(53), 0x00000020 }, { AR5K_PHY(51), 0x00000004 }, { AR5K_PHY(50), 0x00060106 }, { AR5K_PHY(39), 0x0000006d }, { AR5K_PHY(48), 0x00000000 }, { AR5K_PHY(52), 0x00000014 }, { AR5K_PHY_ACT, AR5K_PHY_ACT_ENABLE }, }; /* Initial register settings for AR5211 */ static const struct ath5k_ini ar5211_ini[] = { { AR5K_RXDP, 0x00000000 }, { AR5K_RTSD0, 0x84849c9c }, { AR5K_RTSD1, 0x7c7c7c7c }, { AR5K_RXCFG, 0x00000005 }, { AR5K_MIBC, 0x00000000 }, { AR5K_TOPS, 0x00000008 }, { AR5K_RXNOFRM, 0x00000008 }, { AR5K_TXNOFRM, 0x00000010 }, { AR5K_RPGTO, 0x00000000 }, { AR5K_RFCNT, 0x0000001f }, { AR5K_QUEUE_TXDP(0), 0x00000000 }, { AR5K_QUEUE_TXDP(1), 0x00000000 }, { AR5K_QUEUE_TXDP(2), 0x00000000 }, { AR5K_QUEUE_TXDP(3), 0x00000000 }, { AR5K_QUEUE_TXDP(4), 0x00000000 }, { AR5K_QUEUE_TXDP(5), 0x00000000 }, { AR5K_QUEUE_TXDP(6), 0x00000000 }, { AR5K_QUEUE_TXDP(7), 0x00000000 }, { AR5K_QUEUE_TXDP(8), 0x00000000 }, { AR5K_QUEUE_TXDP(9), 0x00000000 }, { AR5K_DCU_FP, 0x00000000 }, { AR5K_STA_ID1, 0x00000000 }, { AR5K_BSS_ID0, 0x00000000 }, { AR5K_BSS_ID1, 0x00000000 }, { AR5K_RSSI_THR, 0x00000000 }, { AR5K_CFP_PERIOD_5211, 0x00000000 }, { AR5K_TIMER0_5211, 0x00000030 }, { AR5K_TIMER1_5211, 0x0007ffff }, { AR5K_TIMER2_5211, 0x01ffffff }, { AR5K_TIMER3_5211, 0x00000031 }, { AR5K_CFP_DUR_5211, 0x00000000 }, { AR5K_RX_FILTER_5211, 0x00000000 }, { AR5K_MCAST_FILTER0_5211, 0x00000000 }, { AR5K_MCAST_FILTER1_5211, 0x00000002 }, { AR5K_DIAG_SW_5211, 0x00000000 }, { AR5K_ADDAC_TEST, 0x00000000 }, { AR5K_DEFAULT_ANTENNA, 0x00000000 }, /* PHY registers */ { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY(3), 0x2d849093 }, { AR5K_PHY(4), 0x7d32e000 }, { AR5K_PHY(5), 0x00000f6b }, { AR5K_PHY_ACT, 0x00000000 }, { AR5K_PHY(11), 0x00026ffe }, { AR5K_PHY(12), 0x00000000 }, { AR5K_PHY(15), 0x00020100 }, { AR5K_PHY(16), 0x206a017a }, { AR5K_PHY(19), 0x1284613c }, { AR5K_PHY(21), 0x00000859 }, { AR5K_PHY(26), 0x409a4190 }, /* 0x9868 */ { AR5K_PHY(27), 0x050cb081 }, { AR5K_PHY(28), 0x0000000f }, { AR5K_PHY(29), 0x00000080 }, { AR5K_PHY(30), 0x0000000c }, { AR5K_PHY(64), 0x00000000 }, { AR5K_PHY(65), 0x00000000 }, { AR5K_PHY(66), 0x00000000 }, { AR5K_PHY(67), 0x00800000 }, { AR5K_PHY(68), 0x00000001 }, { AR5K_PHY(71), 0x0000092a }, { AR5K_PHY_IQ, 0x00000000 }, { AR5K_PHY(73), 0x00058a05 }, { AR5K_PHY(74), 0x00000001 }, { AR5K_PHY(75), 0x00000000 }, { AR5K_PHY_PAPD_PROBE, 0x00000000 }, { AR5K_PHY(77), 0x00000000 }, /* 0x9934 */ { AR5K_PHY(78), 0x00000000 }, /* 0x9938 */ { AR5K_PHY(79), 0x0000003f }, /* 0x993c */ { AR5K_PHY(80), 0x00000004 }, { AR5K_PHY(82), 0x00000000 }, { AR5K_PHY(83), 0x00000000 }, { AR5K_PHY(84), 0x00000000 }, { AR5K_PHY_RADAR, 0x5d50f14c }, { AR5K_PHY(86), 0x00000018 }, { AR5K_PHY(87), 0x004b6a8e }, /* Initial Power table (32bytes) * common on all cards/modes. * Note: Table is rewritten during * txpower setup later using calibration * data etc. so next write is non-common */ { AR5K_PHY_PCDAC_TXPOWER(1), 0x06ff05ff }, { AR5K_PHY_PCDAC_TXPOWER(2), 0x07ff07ff }, { AR5K_PHY_PCDAC_TXPOWER(3), 0x08ff08ff }, { AR5K_PHY_PCDAC_TXPOWER(4), 0x09ff09ff }, { AR5K_PHY_PCDAC_TXPOWER(5), 0x0aff0aff }, { AR5K_PHY_PCDAC_TXPOWER(6), 0x0bff0bff }, { AR5K_PHY_PCDAC_TXPOWER(7), 0x0cff0cff }, { AR5K_PHY_PCDAC_TXPOWER(8), 0x0dff0dff }, { AR5K_PHY_PCDAC_TXPOWER(9), 0x0fff0eff }, { AR5K_PHY_PCDAC_TXPOWER(10), 0x12ff12ff }, { AR5K_PHY_PCDAC_TXPOWER(11), 0x14ff13ff }, { AR5K_PHY_PCDAC_TXPOWER(12), 0x16ff15ff }, { AR5K_PHY_PCDAC_TXPOWER(13), 0x19ff17ff }, { AR5K_PHY_PCDAC_TXPOWER(14), 0x1bff1aff }, { AR5K_PHY_PCDAC_TXPOWER(15), 0x1eff1dff }, { AR5K_PHY_PCDAC_TXPOWER(16), 0x23ff20ff }, { AR5K_PHY_PCDAC_TXPOWER(17), 0x27ff25ff }, { AR5K_PHY_PCDAC_TXPOWER(18), 0x2cff29ff }, { AR5K_PHY_PCDAC_TXPOWER(19), 0x31ff2fff }, { AR5K_PHY_PCDAC_TXPOWER(20), 0x37ff34ff }, { AR5K_PHY_PCDAC_TXPOWER(21), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(22), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(23), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(24), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(25), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(26), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(27), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(28), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(29), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(30), 0x3aff3aff }, { AR5K_PHY_PCDAC_TXPOWER(31), 0x3aff3aff }, { AR5K_PHY_CCKTXCTL, 0x00000000 }, { AR5K_PHY(642), 0x503e4646 }, { AR5K_PHY_GAIN_2GHZ, 0x6480416c }, { AR5K_PHY(644), 0x0199a003 }, { AR5K_PHY(645), 0x044cd610 }, { AR5K_PHY(646), 0x13800040 }, { AR5K_PHY(647), 0x1be00060 }, { AR5K_PHY(648), 0x0c53800a }, { AR5K_PHY(649), 0x0014df3b }, { AR5K_PHY(650), 0x000001b5 }, { AR5K_PHY(651), 0x00000020 }, }; /* Initial mode-specific settings for AR5211 * 5211 supports OFDM-only g (draft g) but we * need to test it ! */ static const struct ath5k_ini_mode ar5211_ini_mode[] = { { AR5K_TXCFG, /* A B G */ { 0x00000015, 0x0000001d, 0x00000015 } }, { AR5K_QUEUE_DFS_LOCAL_IFS(0), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(1), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(2), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(3), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(4), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(5), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(6), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(7), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(8), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(9), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_DCU_GBL_IFS_SLOT, { 0x00000168, 0x000001b8, 0x00000168 } }, { AR5K_DCU_GBL_IFS_SIFS, { 0x00000230, 0x000000b0, 0x00000230 } }, { AR5K_DCU_GBL_IFS_EIFS, { 0x00000d98, 0x00001f48, 0x00000d98 } }, { AR5K_DCU_GBL_IFS_MISC, { 0x0000a0e0, 0x00005880, 0x0000a0e0 } }, { AR5K_TIME_OUT, { 0x04000400, 0x20003000, 0x04000400 } }, { AR5K_USEC_5211, { 0x0e8d8fa7, 0x01608f95, 0x0e8d8fa7 } }, { AR5K_PHY(8), { 0x02020200, 0x02010200, 0x02020200 } }, { AR5K_PHY_RF_CTL2, { 0x00000e0e, 0x00000707, 0x00000e0e } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05010000, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY_SETTLING, { 0x1372169c, 0x137216a8, 0x1372169c } }, { AR5K_PHY_GAIN, { 0x0018ba67, 0x0018ba69, 0x0018ba69 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } }, { AR5K_PHY_AGCCOARSE, { 0x31375d5e, 0x313a5d5e, 0x31375d5e } }, { AR5K_PHY_AGCCTL, { 0x0000bd10, 0x0000bd38, 0x0000bd10 } }, { AR5K_PHY_NF, { 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, { AR5K_PHY_RX_DELAY, { 0x00002710, 0x0000157c, 0x00002710 } }, { AR5K_PHY(70), { 0x00000190, 0x00000084, 0x00000190 } }, { AR5K_PHY_FRAME_CTL_5211, { 0x6fe01020, 0x6fe00920, 0x6fe01020 } }, { AR5K_PHY_PCDAC_TXPOWER_BASE, { 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } }, { AR5K_RF_BUFFER_CONTROL_4, { 0x00000010, 0x00000010, 0x00000010 } }, }; /* Initial register settings for AR5212 and newer chips */ static const struct ath5k_ini ar5212_ini_common_start[] = { { AR5K_RXDP, 0x00000000 }, { AR5K_RXCFG, 0x00000005 }, { AR5K_MIBC, 0x00000000 }, { AR5K_TOPS, 0x00000008 }, { AR5K_RXNOFRM, 0x00000008 }, { AR5K_TXNOFRM, 0x00000010 }, { AR5K_RPGTO, 0x00000000 }, { AR5K_RFCNT, 0x0000001f }, { AR5K_QUEUE_TXDP(0), 0x00000000 }, { AR5K_QUEUE_TXDP(1), 0x00000000 }, { AR5K_QUEUE_TXDP(2), 0x00000000 }, { AR5K_QUEUE_TXDP(3), 0x00000000 }, { AR5K_QUEUE_TXDP(4), 0x00000000 }, { AR5K_QUEUE_TXDP(5), 0x00000000 }, { AR5K_QUEUE_TXDP(6), 0x00000000 }, { AR5K_QUEUE_TXDP(7), 0x00000000 }, { AR5K_QUEUE_TXDP(8), 0x00000000 }, { AR5K_QUEUE_TXDP(9), 0x00000000 }, { AR5K_DCU_FP, 0x00000000 }, { AR5K_DCU_TXP, 0x00000000 }, /* Tx filter table 0 (32 entries) */ { AR5K_DCU_TX_FILTER_0(0), 0x00000000 }, /* DCU 0 */ { AR5K_DCU_TX_FILTER_0(1), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(2), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(3), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(4), 0x00000000 }, /* DCU 1 */ { AR5K_DCU_TX_FILTER_0(5), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(6), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(7), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(8), 0x00000000 }, /* DCU 2 */ { AR5K_DCU_TX_FILTER_0(9), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(10), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(11), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(12), 0x00000000 }, /* DCU 3 */ { AR5K_DCU_TX_FILTER_0(13), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(14), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(15), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(16), 0x00000000 }, /* DCU 4 */ { AR5K_DCU_TX_FILTER_0(17), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(18), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(19), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(20), 0x00000000 }, /* DCU 5 */ { AR5K_DCU_TX_FILTER_0(21), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(22), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(23), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(24), 0x00000000 }, /* DCU 6 */ { AR5K_DCU_TX_FILTER_0(25), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(26), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(27), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(28), 0x00000000 }, /* DCU 7 */ { AR5K_DCU_TX_FILTER_0(29), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(30), 0x00000000 }, { AR5K_DCU_TX_FILTER_0(31), 0x00000000 }, /* Tx filter table 1 (16 entries) */ { AR5K_DCU_TX_FILTER_1(0), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(1), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(2), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(3), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(4), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(5), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(6), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(7), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(8), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(9), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(10), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(11), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(12), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(13), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(14), 0x00000000 }, { AR5K_DCU_TX_FILTER_1(15), 0x00000000 }, { AR5K_DCU_TX_FILTER_CLR, 0x00000000 }, { AR5K_DCU_TX_FILTER_SET, 0x00000000 }, { AR5K_STA_ID1, 0x00000000 }, { AR5K_BSS_ID0, 0x00000000 }, { AR5K_BSS_ID1, 0x00000000 }, { AR5K_BEACON_5211, 0x00000000 }, { AR5K_CFP_PERIOD_5211, 0x00000000 }, { AR5K_TIMER0_5211, 0x00000030 }, { AR5K_TIMER1_5211, 0x0007ffff }, { AR5K_TIMER2_5211, 0x01ffffff }, { AR5K_TIMER3_5211, 0x00000031 }, { AR5K_CFP_DUR_5211, 0x00000000 }, { AR5K_RX_FILTER_5211, 0x00000000 }, { AR5K_DIAG_SW_5211, 0x00000000 }, { AR5K_ADDAC_TEST, 0x00000000 }, { AR5K_DEFAULT_ANTENNA, 0x00000000 }, { AR5K_FRAME_CTL_QOSM, 0x000fc78f }, { AR5K_XRMODE, 0x2a82301a }, { AR5K_XRDELAY, 0x05dc01e0 }, { AR5K_XRTIMEOUT, 0x1f402710 }, { AR5K_XRCHIRP, 0x01f40000 }, { AR5K_XRSTOMP, 0x00001e1c }, { AR5K_SLEEP0, 0x0002aaaa }, { AR5K_SLEEP1, 0x02005555 }, { AR5K_SLEEP2, 0x00000000 }, { AR_BSSMSKL, 0xffffffff }, { AR_BSSMSKU, 0x0000ffff }, { AR5K_TXPC, 0x00000000 }, { AR5K_PROFCNT_TX, 0x00000000 }, { AR5K_PROFCNT_RX, 0x00000000 }, { AR5K_PROFCNT_RXCLR, 0x00000000 }, { AR5K_PROFCNT_CYCLE, 0x00000000 }, { AR5K_QUIET_CTL1, 0x00000088 }, /* Initial rate duration table (32 entries )*/ { AR5K_RATE_DUR(0), 0x00000000 }, { AR5K_RATE_DUR(1), 0x0000008c }, { AR5K_RATE_DUR(2), 0x000000e4 }, { AR5K_RATE_DUR(3), 0x000002d5 }, { AR5K_RATE_DUR(4), 0x00000000 }, { AR5K_RATE_DUR(5), 0x00000000 }, { AR5K_RATE_DUR(6), 0x000000a0 }, { AR5K_RATE_DUR(7), 0x000001c9 }, { AR5K_RATE_DUR(8), 0x0000002c }, { AR5K_RATE_DUR(9), 0x0000002c }, { AR5K_RATE_DUR(10), 0x00000030 }, { AR5K_RATE_DUR(11), 0x0000003c }, { AR5K_RATE_DUR(12), 0x0000002c }, { AR5K_RATE_DUR(13), 0x0000002c }, { AR5K_RATE_DUR(14), 0x00000030 }, { AR5K_RATE_DUR(15), 0x0000003c }, { AR5K_RATE_DUR(16), 0x00000000 }, { AR5K_RATE_DUR(17), 0x00000000 }, { AR5K_RATE_DUR(18), 0x00000000 }, { AR5K_RATE_DUR(19), 0x00000000 }, { AR5K_RATE_DUR(20), 0x00000000 }, { AR5K_RATE_DUR(21), 0x00000000 }, { AR5K_RATE_DUR(22), 0x00000000 }, { AR5K_RATE_DUR(23), 0x00000000 }, { AR5K_RATE_DUR(24), 0x000000d5 }, { AR5K_RATE_DUR(25), 0x000000df }, { AR5K_RATE_DUR(26), 0x00000102 }, { AR5K_RATE_DUR(27), 0x0000013a }, { AR5K_RATE_DUR(28), 0x00000075 }, { AR5K_RATE_DUR(29), 0x0000007f }, { AR5K_RATE_DUR(30), 0x000000a2 }, { AR5K_RATE_DUR(31), 0x00000000 }, { AR5K_QUIET_CTL2, 0x00010002 }, { AR5K_TSF_PARM, 0x00000001 }, { AR5K_QOS_NOACK, 0x000000c0 }, { AR5K_PHY_ERR_FIL, 0x00000000 }, { AR5K_XRLAT_TX, 0x00000168 }, { AR5K_ACKSIFS, 0x00000000 }, /* Rate -> db table * notice ...03<-02<-01<-00 ! */ { AR5K_RATE2DB(0), 0x03020100 }, { AR5K_RATE2DB(1), 0x07060504 }, { AR5K_RATE2DB(2), 0x0b0a0908 }, { AR5K_RATE2DB(3), 0x0f0e0d0c }, { AR5K_RATE2DB(4), 0x13121110 }, { AR5K_RATE2DB(5), 0x17161514 }, { AR5K_RATE2DB(6), 0x1b1a1918 }, { AR5K_RATE2DB(7), 0x1f1e1d1c }, /* Db -> Rate table */ { AR5K_DB2RATE(0), 0x03020100 }, { AR5K_DB2RATE(1), 0x07060504 }, { AR5K_DB2RATE(2), 0x0b0a0908 }, { AR5K_DB2RATE(3), 0x0f0e0d0c }, { AR5K_DB2RATE(4), 0x13121110 }, { AR5K_DB2RATE(5), 0x17161514 }, { AR5K_DB2RATE(6), 0x1b1a1918 }, { AR5K_DB2RATE(7), 0x1f1e1d1c }, /* PHY registers (Common settings * for all chips/modes) */ { AR5K_PHY(3), 0xad848e19 }, { AR5K_PHY(4), 0x7d28e000 }, { AR5K_PHY_TIMING_3, 0x9c0a9f6b }, { AR5K_PHY_ACT, 0x00000000 }, { AR5K_PHY(16), 0x206a017a }, { AR5K_PHY(21), 0x00000859 }, { AR5K_PHY_BIN_MASK_1, 0x00000000 }, { AR5K_PHY_BIN_MASK_2, 0x00000000 }, { AR5K_PHY_BIN_MASK_3, 0x00000000 }, { AR5K_PHY_BIN_MASK_CTL, 0x00800000 }, { AR5K_PHY_ANT_CTL, 0x00000001 }, /*{ AR5K_PHY(71), 0x0000092a },*/ /* Old value */ { AR5K_PHY_MAX_RX_LEN, 0x00000c80 }, { AR5K_PHY_IQ, 0x05100000 }, { AR5K_PHY_WARM_RESET, 0x00000001 }, { AR5K_PHY_CTL, 0x00000004 }, { AR5K_PHY_TXPOWER_RATE1, 0x1e1f2022 }, { AR5K_PHY_TXPOWER_RATE2, 0x0a0b0c0d }, { AR5K_PHY_TXPOWER_RATE_MAX, 0x0000003f }, { AR5K_PHY(82), 0x9280b212 }, { AR5K_PHY_RADAR, 0x5d50e188 }, /*{ AR5K_PHY(86), 0x000000ff },*/ { AR5K_PHY(87), 0x004b6a8e }, { AR5K_PHY_NFTHRES, 0x000003ce }, { AR5K_PHY_RESTART, 0x192fb515 }, { AR5K_PHY(94), 0x00000001 }, { AR5K_PHY_RFBUS_REQ, 0x00000000 }, /*{ AR5K_PHY(644), 0x0080a333 },*/ /* Old value */ /*{ AR5K_PHY(645), 0x00206c10 },*/ /* Old value */ { AR5K_PHY(644), 0x00806333 }, { AR5K_PHY(645), 0x00106c10 }, { AR5K_PHY(646), 0x009c4060 }, /* { AR5K_PHY(647), 0x1483800a }, */ /* { AR5K_PHY(648), 0x01831061 }, */ /* Old value */ { AR5K_PHY(648), 0x018830c6 }, { AR5K_PHY(649), 0x00000400 }, /*{ AR5K_PHY(650), 0x000001b5 },*/ { AR5K_PHY(651), 0x00000000 }, { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, { AR5K_PHY_TXPOWER_RATE4, 0x20202020 }, /*{ AR5K_PHY(655), 0x13c889af },*/ { AR5K_PHY(656), 0x38490a20 }, { AR5K_PHY(657), 0x00007bb6 }, { AR5K_PHY(658), 0x0fff3ffc }, }; /* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = { { AR5K_QUEUE_DFS_LOCAL_IFS(0), /* A/XR B G */ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(1), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(2), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(3), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(4), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(5), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(6), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(7), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(8), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_QUEUE_DFS_LOCAL_IFS(9), { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, { AR5K_DCU_GBL_IFS_SIFS, { 0x00000230, 0x000000b0, 0x00000160 } }, { AR5K_DCU_GBL_IFS_SLOT, { 0x00000168, 0x000001b8, 0x0000018c } }, { AR5K_DCU_GBL_IFS_EIFS, { 0x00000e60, 0x00001f1c, 0x00003e38 } }, { AR5K_DCU_GBL_IFS_MISC, { 0x0000a0e0, 0x00005880, 0x0000b0e0 } }, { AR5K_TIME_OUT, { 0x03e803e8, 0x04200420, 0x08400840 } }, { AR5K_PHY(8), { 0x02020200, 0x02010200, 0x02020200 } }, { AR5K_PHY_RF_CTL2, { 0x00000e0e, 0x00000707, 0x00000e0e } }, { AR5K_PHY_SETTLING, { 0x1372161c, 0x13721722, 0x137216a2 } }, { AR5K_PHY_AGCCTL, { 0x00009d10, 0x00009d18, 0x00009d18 } }, { AR5K_PHY_NF, { 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, { AR5K_PHY_WEAK_OFDM_HIGH_THR, { 0x409a4190, 0x409a4190, 0x409a4190 } }, { AR5K_PHY(70), { 0x000001b8, 0x00000084, 0x00000108 } }, { AR5K_PHY_OFDM_SELFCORR, { 0x10058a05, 0x10058a05, 0x10058a05 } }, { 0xa230, { 0x00000000, 0x00000000, 0x00000108 } }, }; /* Initial mode-specific settings for AR5212 + RF5111 * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00008015, 0x00008015, 0x00008015 } }, { AR5K_USEC_5211, { 0x128d8fa7, 0x04e00f95, 0x12e00fab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05010100, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018da5a, 0x0018ca69, 0x0018ca69 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3137665e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb080 } }, { AR5K_PHY_RX_DELAY, { 0x00002710, 0x0000157c, 0x00002af8 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81020, 0xf7b80d20, 0xf7b81020 } }, { AR5K_PHY_GAIN_2GHZ, { 0x642c416a, 0x6440416a, 0x6440416a } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1873800a, 0x1883800a } }, }; /* Common for all modes */ static const struct ath5k_ini rf5111_ini_common_end[] = { { AR5K_DCU_FP, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x00022ffe }, { 0x983c, 0x00020100 }, { AR5K_PHY_GAIN_OFFSET, 0x1284613c }, { AR5K_PHY_PAPD_PROBE, 0x00004883 }, { 0x9940, 0x00000004 }, { 0x9958, 0x000000ff }, { 0x9974, 0x00000000 }, { AR5K_PHY_SPENDING, 0x00000018 }, { AR5K_PHY_CCKTXCTL, 0x00000000 }, { AR5K_PHY_CCK_CROSSCORR, 0xd03e6788 }, { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 }, { 0xa23c, 0x13c889af }, }; /* Initial mode-specific settings for AR5212 + RF5112 * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5112_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00008015, 0x00008015, 0x00008015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x04e01395, 0x12e013ab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05020100, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018da6d, 0x0018ca75, 0x0018ca75 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3137665e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x0000044c, 0x00000898 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81020, 0xf7b80d10, 0xf7b81010 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000008, 0x00000008 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x642c0140, 0x6442c160, 0x6442c160 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1873800a, 0x1883800a } }, }; static const struct ath5k_ini rf5112_ini_common_end[] = { { AR5K_DCU_FP, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x00022ffe }, { 0x983c, 0x00020100 }, { AR5K_PHY_GAIN_OFFSET, 0x1284613c }, { AR5K_PHY_PAPD_PROBE, 0x00004882 }, { 0x9940, 0x00000004 }, { 0x9958, 0x000000ff }, { 0x9974, 0x00000000 }, { AR5K_PHY_DAG_CCK_CTL, 0x000001b5 }, { 0xa23c, 0x13c889af }, }; /* Initial mode-specific settings for RF5413/5414 * (Written after ar5212_ini) */ static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x04e01395, 0x12e013ab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05020100, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000007, 0x0000000b, 0x0000000b } }, { AR5K_PHY_GAIN, { 0x0018fa61, 0x001a1a63, 0x001a1a63 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } }, { AR5K_PHY_SIG, { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3139605e, 0x3139605e, 0x3139605e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x0000044c, 0x00000898 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x002ec1e0, 0x002ac120, 0x002ac120 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1863800a, 0x1883800a } }, { 0xa300, { 0x18010000, 0x18010000, 0x18010000 } }, { 0xa304, { 0x30032602, 0x30032602, 0x30032602 } }, { 0xa308, { 0x48073e06, 0x48073e06, 0x48073e06 } }, { 0xa30c, { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } }, { 0xa310, { 0x641a600f, 0x641a600f, 0x641a600f } }, { 0xa314, { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } }, { 0xa318, { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } }, { 0xa31c, { 0x90cf865b, 0x8ecf865b, 0x8ecf865b } }, { 0xa320, { 0x9d4f970f, 0x9b4f970f, 0x9b4f970f } }, { 0xa324, { 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f } }, { 0xa328, { 0xb55faf1f, 0xb35faf1f, 0xb35faf1f } }, { 0xa32c, { 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f } }, { 0xa330, { 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f } }, { 0xa334, { 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } }, }; static const struct ath5k_ini rf5413_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_5414_CBCFG, 0x00000010 }, { AR5K_SEQ_MASK, 0x0000000f }, { 0x809c, 0x00000000 }, { 0x80a0, 0x00000000 }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800003f9 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x00081fff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c16a }, { 0xa270, 0x00820820 }, { 0xa274, 0x081b7caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa338, 0x00000000 }, { 0xa33c, 0x00000000 }, { 0xa340, 0x00000000 }, { 0xa344, 0x00000000 }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* Initial mode-specific settings for RF2413/2414 * (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x04e01395, 0x12e013ab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05020000, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e00, 0x00000e00, 0x00000e00 } }, { AR5K_PHY_PA_CTL, { 0x00000002, 0x0000000a, 0x0000000a } }, { AR5K_PHY_GAIN, { 0x0018da6d, 0x001a6a64, 0x001a6a64 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da } }, { AR5K_PHY_SIG, { 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3137665e, 0x3137665e, 0x3139605e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x0000044c, 0x00000898 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x002c0140, 0x0042c140, 0x0042c140 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1863800a, 0x1883800a } }, }; static const struct ath5k_ini rf2413_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_SEQ_MASK, 0x0000000f }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800000a8 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x000000ff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c16a }, { 0xa270, 0x00820820 }, { 0xa274, 0x001b7caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa300, 0x18010000 }, { 0xa304, 0x30032602 }, { 0xa308, 0x48073e06 }, { 0xa30c, 0x560b4c0a }, { 0xa310, 0x641a600f }, { 0xa314, 0x784f6e1b }, { 0xa318, 0x868f7c5a }, { 0xa31c, 0x8ecf865b }, { 0xa320, 0x9d4f970f }, { 0xa324, 0xa5cfa18f }, { 0xa328, 0xb55faf1f }, { 0xa32c, 0xbddfb99f }, { 0xa330, 0xcd7fc73f }, { 0xa334, 0xd5ffd1bf }, { 0xa338, 0x00000000 }, { 0xa33c, 0x00000000 }, { 0xa340, 0x00000000 }, { 0xa344, 0x00000000 }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* Initial mode-specific settings for RF2425 * (Written after ar5212_ini) */ /* XXX: a mode ? */ static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { { AR5K_TXCFG, /* A/XR B G */ { 0x00000015, 0x00000015, 0x00000015 } }, { AR5K_USEC_5211, { 0x128d93a7, 0x04e01395, 0x12e013ab } }, { AR5K_PHY_RF_CTL3, { 0x0a020001, 0x05020100, 0x0a020001 } }, { AR5K_PHY_RF_CTL4, { 0x00000e0e, 0x00000e0e, 0x00000e0e } }, { AR5K_PHY_PA_CTL, { 0x00000003, 0x0000000b, 0x0000000b } }, { AR5K_PHY_SETTLING, { 0x1372161c, 0x13721722, 0x13721422 } }, { AR5K_PHY_GAIN, { 0x0018fa61, 0x00199a65, 0x00199a65 } }, { AR5K_PHY_DESIRED_SIZE, { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } }, { AR5K_PHY_SIG, { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, { AR5K_PHY_AGCCOARSE, { 0x3139605e, 0x3139605e, 0x3139605e } }, { AR5K_PHY_WEAK_OFDM_LOW_THR, { 0x050cb081, 0x050cb081, 0x050cb081 } }, { AR5K_PHY_RX_DELAY, { 0x000007d0, 0x0000044c, 0x00000898 } }, { AR5K_PHY_FRAME_CTL_5211, { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } }, { AR5K_PHY_CCKTXCTL, { 0x00000000, 0x00000000, 0x00000000 } }, { AR5K_PHY_CCK_CROSSCORR, { 0xd6be6788, 0xd03e6788, 0xd03e6788 } }, { AR5K_PHY_GAIN_2GHZ, { 0x00000140, 0x0052c140, 0x0052c140 } }, { AR5K_PHY_CCK_RX_CTL_4, { 0x1883800a, 0x1863800a, 0x1883800a } }, { 0xa324, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa328, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa32c, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa330, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, { 0xa334, { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, }; static const struct ath5k_ini rf2425_ini_common_end[] = { { AR5K_DCU_FP, 0x000003e0 }, { AR5K_SEQ_MASK, 0x0000000f }, { 0x809c, 0x00000000 }, { 0x80a0, 0x00000000 }, { AR5K_MIC_QOS_CTL, 0x00000000 }, { AR5K_MIC_QOS_SEL, 0x00000000 }, { AR5K_MISC_MODE, 0x00000000 }, { AR5K_OFDM_FIL_CNT, 0x00000000 }, { AR5K_CCK_FIL_CNT, 0x00000000 }, { AR5K_PHYERR_CNT1, 0x00000000 }, { AR5K_PHYERR_CNT1_MASK, 0x00000000 }, { AR5K_PHYERR_CNT2, 0x00000000 }, { AR5K_PHYERR_CNT2_MASK, 0x00000000 }, { AR5K_TSF_THRES, 0x00000000 }, { 0x8140, 0x800003f9 }, { 0x8144, 0x00000000 }, { AR5K_PHY_AGC, 0x00000000 }, { AR5K_PHY_ADC_CTL, 0x0000a000 }, { 0x983c, 0x00200400 }, { AR5K_PHY_GAIN_OFFSET, 0x1284233c }, { AR5K_PHY_SCR, 0x0000001f }, { AR5K_PHY_SLMT, 0x00000080 }, { AR5K_PHY_SCAL, 0x0000000e }, { 0x9958, 0x00081fff }, { AR5K_PHY_TIMING_7, 0x00000000 }, { AR5K_PHY_TIMING_8, 0x02800000 }, { AR5K_PHY_TIMING_11, 0x00000000 }, { 0x99dc, 0xfebadbe8 }, { AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 }, { 0x99e4, 0xaaaaaaaa }, { 0x99e8, 0x3c466478 }, { 0x99ec, 0x000000aa }, { AR5K_PHY_SCLOCK, 0x0000000c }, { AR5K_PHY_SDELAY, 0x000000ff }, { AR5K_PHY_SPENDING, 0x00000014 }, { AR5K_PHY_DAG_CCK_CTL, 0x000009b5 }, { AR5K_PHY_TXPOWER_RATE3, 0x20202020 }, { AR5K_PHY_TXPOWER_RATE4, 0x20202020 }, { 0xa23c, 0x93c889af }, { AR5K_PHY_FAST_ADC, 0x00000001 }, { 0xa250, 0x0000a000 }, { AR5K_PHY_BLUETOOTH, 0x00000000 }, { AR5K_PHY_TPC_RG1, 0x0cc75380 }, { 0xa25c, 0x0f0f0f01 }, { 0xa260, 0x5f690f01 }, { 0xa264, 0x00418a11 }, { 0xa268, 0x00000000 }, { AR5K_PHY_TPC_RG5, 0x0c30c166 }, { 0xa270, 0x00820820 }, { 0xa274, 0x081a3caa }, { 0xa278, 0x1ce739ce }, { 0xa27c, 0x051701ce }, { 0xa300, 0x16010000 }, { 0xa304, 0x2c032402 }, { 0xa308, 0x48433e42 }, { 0xa30c, 0x5a0f500b }, { 0xa310, 0x6c4b624a }, { 0xa314, 0x7e8b748a }, { 0xa318, 0x96cf8ccb }, { 0xa31c, 0xa34f9d0f }, { 0xa320, 0xa7cfa58f }, { 0xa348, 0x3fffffff }, { 0xa34c, 0x3fffffff }, { 0xa350, 0x3fffffff }, { 0xa354, 0x0003ffff }, { 0xa358, 0x79a8aa1f }, { 0xa35c, 0x066c420f }, { 0xa360, 0x0f282207 }, { 0xa364, 0x17601685 }, { 0xa368, 0x1f801104 }, { 0xa36c, 0x37a00c03 }, { 0xa370, 0x3fc40883 }, { 0xa374, 0x57c00803 }, { 0xa378, 0x5fd80682 }, { 0xa37c, 0x7fe00482 }, { 0xa380, 0x7f3c7bba }, { 0xa384, 0xf3307ff0 }, }; /* * Initial BaseBand Gain settings for RF5111/5112 (AR5210 comes with * RF5110 only so initial BB Gain settings are included in AR5K_AR5210_INI) */ /* RF5111 Initial BaseBand Gain settings */ static const struct ath5k_ini rf5111_ini_bbgain[] = { { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000020 }, { AR5K_BB_GAIN(2), 0x00000010 }, { AR5K_BB_GAIN(3), 0x00000030 }, { AR5K_BB_GAIN(4), 0x00000008 }, { AR5K_BB_GAIN(5), 0x00000028 }, { AR5K_BB_GAIN(6), 0x00000004 }, { AR5K_BB_GAIN(7), 0x00000024 }, { AR5K_BB_GAIN(8), 0x00000014 }, { AR5K_BB_GAIN(9), 0x00000034 }, { AR5K_BB_GAIN(10), 0x0000000c }, { AR5K_BB_GAIN(11), 0x0000002c }, { AR5K_BB_GAIN(12), 0x00000002 }, { AR5K_BB_GAIN(13), 0x00000022 }, { AR5K_BB_GAIN(14), 0x00000012 }, { AR5K_BB_GAIN(15), 0x00000032 }, { AR5K_BB_GAIN(16), 0x0000000a }, { AR5K_BB_GAIN(17), 0x0000002a }, { AR5K_BB_GAIN(18), 0x00000006 }, { AR5K_BB_GAIN(19), 0x00000026 }, { AR5K_BB_GAIN(20), 0x00000016 }, { AR5K_BB_GAIN(21), 0x00000036 }, { AR5K_BB_GAIN(22), 0x0000000e }, { AR5K_BB_GAIN(23), 0x0000002e }, { AR5K_BB_GAIN(24), 0x00000001 }, { AR5K_BB_GAIN(25), 0x00000021 }, { AR5K_BB_GAIN(26), 0x00000011 }, { AR5K_BB_GAIN(27), 0x00000031 }, { AR5K_BB_GAIN(28), 0x00000009 }, { AR5K_BB_GAIN(29), 0x00000029 }, { AR5K_BB_GAIN(30), 0x00000005 }, { AR5K_BB_GAIN(31), 0x00000025 }, { AR5K_BB_GAIN(32), 0x00000015 }, { AR5K_BB_GAIN(33), 0x00000035 }, { AR5K_BB_GAIN(34), 0x0000000d }, { AR5K_BB_GAIN(35), 0x0000002d }, { AR5K_BB_GAIN(36), 0x00000003 }, { AR5K_BB_GAIN(37), 0x00000023 }, { AR5K_BB_GAIN(38), 0x00000013 }, { AR5K_BB_GAIN(39), 0x00000033 }, { AR5K_BB_GAIN(40), 0x0000000b }, { AR5K_BB_GAIN(41), 0x0000002b }, { AR5K_BB_GAIN(42), 0x0000002b }, { AR5K_BB_GAIN(43), 0x0000002b }, { AR5K_BB_GAIN(44), 0x0000002b }, { AR5K_BB_GAIN(45), 0x0000002b }, { AR5K_BB_GAIN(46), 0x0000002b }, { AR5K_BB_GAIN(47), 0x0000002b }, { AR5K_BB_GAIN(48), 0x0000002b }, { AR5K_BB_GAIN(49), 0x0000002b }, { AR5K_BB_GAIN(50), 0x0000002b }, { AR5K_BB_GAIN(51), 0x0000002b }, { AR5K_BB_GAIN(52), 0x0000002b }, { AR5K_BB_GAIN(53), 0x0000002b }, { AR5K_BB_GAIN(54), 0x0000002b }, { AR5K_BB_GAIN(55), 0x0000002b }, { AR5K_BB_GAIN(56), 0x0000002b }, { AR5K_BB_GAIN(57), 0x0000002b }, { AR5K_BB_GAIN(58), 0x0000002b }, { AR5K_BB_GAIN(59), 0x0000002b }, { AR5K_BB_GAIN(60), 0x0000002b }, { AR5K_BB_GAIN(61), 0x0000002b }, { AR5K_BB_GAIN(62), 0x00000002 }, { AR5K_BB_GAIN(63), 0x00000016 }, }; /* RF5112 Initial BaseBand Gain settings (Same for RF5413/5414+) */ static const struct ath5k_ini rf5112_ini_bbgain[] = { { AR5K_BB_GAIN(0), 0x00000000 }, { AR5K_BB_GAIN(1), 0x00000001 }, { AR5K_BB_GAIN(2), 0x00000002 }, { AR5K_BB_GAIN(3), 0x00000003 }, { AR5K_BB_GAIN(4), 0x00000004 }, { AR5K_BB_GAIN(5), 0x00000005 }, { AR5K_BB_GAIN(6), 0x00000008 }, { AR5K_BB_GAIN(7), 0x00000009 }, { AR5K_BB_GAIN(8), 0x0000000a }, { AR5K_BB_GAIN(9), 0x0000000b }, { AR5K_BB_GAIN(10), 0x0000000c }, { AR5K_BB_GAIN(11), 0x0000000d }, { AR5K_BB_GAIN(12), 0x00000010 }, { AR5K_BB_GAIN(13), 0x00000011 }, { AR5K_BB_GAIN(14), 0x00000012 }, { AR5K_BB_GAIN(15), 0x00000013 }, { AR5K_BB_GAIN(16), 0x00000014 }, { AR5K_BB_GAIN(17), 0x00000015 }, { AR5K_BB_GAIN(18), 0x00000018 }, { AR5K_BB_GAIN(19), 0x00000019 }, { AR5K_BB_GAIN(20), 0x0000001a }, { AR5K_BB_GAIN(21), 0x0000001b }, { AR5K_BB_GAIN(22), 0x0000001c }, { AR5K_BB_GAIN(23), 0x0000001d }, { AR5K_BB_GAIN(24), 0x00000020 }, { AR5K_BB_GAIN(25), 0x00000021 }, { AR5K_BB_GAIN(26), 0x00000022 }, { AR5K_BB_GAIN(27), 0x00000023 }, { AR5K_BB_GAIN(28), 0x00000024 }, { AR5K_BB_GAIN(29), 0x00000025 }, { AR5K_BB_GAIN(30), 0x00000028 }, { AR5K_BB_GAIN(31), 0x00000029 }, { AR5K_BB_GAIN(32), 0x0000002a }, { AR5K_BB_GAIN(33), 0x0000002b }, { AR5K_BB_GAIN(34), 0x0000002c }, { AR5K_BB_GAIN(35), 0x0000002d }, { AR5K_BB_GAIN(36), 0x00000030 }, { AR5K_BB_GAIN(37), 0x00000031 }, { AR5K_BB_GAIN(38), 0x00000032 }, { AR5K_BB_GAIN(39), 0x00000033 }, { AR5K_BB_GAIN(40), 0x00000034 }, { AR5K_BB_GAIN(41), 0x00000035 }, { AR5K_BB_GAIN(42), 0x00000035 }, { AR5K_BB_GAIN(43), 0x00000035 }, { AR5K_BB_GAIN(44), 0x00000035 }, { AR5K_BB_GAIN(45), 0x00000035 }, { AR5K_BB_GAIN(46), 0x00000035 }, { AR5K_BB_GAIN(47), 0x00000035 }, { AR5K_BB_GAIN(48), 0x00000035 }, { AR5K_BB_GAIN(49), 0x00000035 }, { AR5K_BB_GAIN(50), 0x00000035 }, { AR5K_BB_GAIN(51), 0x00000035 }, { AR5K_BB_GAIN(52), 0x00000035 }, { AR5K_BB_GAIN(53), 0x00000035 }, { AR5K_BB_GAIN(54), 0x00000035 }, { AR5K_BB_GAIN(55), 0x00000035 }, { AR5K_BB_GAIN(56), 0x00000035 }, { AR5K_BB_GAIN(57), 0x00000035 }, { AR5K_BB_GAIN(58), 0x00000035 }, { AR5K_BB_GAIN(59), 0x00000035 }, { AR5K_BB_GAIN(60), 0x00000035 }, { AR5K_BB_GAIN(61), 0x00000035 }, { AR5K_BB_GAIN(62), 0x00000010 }, { AR5K_BB_GAIN(63), 0x0000001a }, }; /** * ath5k_hw_ini_registers() - Write initial register dump common for all modes * @ah: The &struct ath5k_hw * @size: Dump size * @ini_regs: The array of &struct ath5k_ini * @skip_pcu: Skip PCU registers */ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini *ini_regs, bool skip_pcu) { unsigned int i; /* Write initial registers */ for (i = 0; i < size; i++) { /* Skip PCU registers if * requested */ if (skip_pcu && ini_regs[i].ini_register >= AR5K_PCU_MIN && ini_regs[i].ini_register <= AR5K_PCU_MAX) continue; switch (ini_regs[i].ini_mode) { case AR5K_INI_READ: /* Cleared on read */ ath5k_hw_reg_read(ah, ini_regs[i].ini_register); break; case AR5K_INI_WRITE: default: AR5K_REG_WAIT(i); ath5k_hw_reg_write(ah, ini_regs[i].ini_value, ini_regs[i].ini_register); } } } /** * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump * @ah: The &struct ath5k_hw * @size: Dump size * @ini_mode: The array of &struct ath5k_ini_mode * @mode: One of enum ath5k_driver_mode */ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, unsigned int size, const struct ath5k_ini_mode *ini_mode, u8 mode) { unsigned int i; for (i = 0; i < size; i++) { AR5K_REG_WAIT(i); ath5k_hw_reg_write(ah, ini_mode[i].mode_value[mode], (u32)ini_mode[i].mode_register); } } /** * ath5k_hw_write_initvals() - Write initial chip-specific register dump * @ah: The &struct ath5k_hw * @mode: One of enum ath5k_driver_mode * @skip_pcu: Skip PCU registers * * Write initial chip-specific register dump, to get the chipset on a * clean and ready-to-work state after warm reset. */ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu) { /* * Write initial register settings */ /* For AR5212 and compatible */ if (ah->ah_version == AR5K_AR5212) { /* First set of mode-specific settings */ ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5212_ini_mode_start), ar5212_ini_mode_start, mode); /* * Write initial settings common for all modes */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start), ar5212_ini_common_start, skip_pcu); /* Second set of mode-specific settings */ switch (ah->ah_radio) { case AR5K_RF5111: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5111_ini_mode_end), rf5111_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_common_end), rf5111_ini_common_end, skip_pcu); /* Baseband gain table */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain), rf5111_ini_bbgain, skip_pcu); break; case AR5K_RF5112: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5112_ini_mode_end), rf5112_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_common_end), rf5112_ini_common_end, skip_pcu); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; case AR5K_RF5413: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf5413_ini_mode_end), rf5413_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5413_ini_common_end), rf5413_ini_common_end, skip_pcu); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; case AR5K_RF2316: case AR5K_RF2413: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf2413_ini_mode_end), rf2413_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf2413_ini_common_end), rf2413_ini_common_end, skip_pcu); /* Override settings from rf2413_ini_common_end */ if (ah->ah_radio == AR5K_RF2316) { ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC); ath5k_hw_reg_write(ah, 0x081b7caa, 0xa274); } ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; case AR5K_RF2317: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf2413_ini_mode_end), rf2413_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf2425_ini_common_end), rf2425_ini_common_end, skip_pcu); /* Override settings from rf2413_ini_mode_end */ ath5k_hw_reg_write(ah, 0x00180a65, AR5K_PHY_GAIN); /* Override settings from rf2413_ini_common_end */ ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC); AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TPC_RG5, AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP, 0xa); ath5k_hw_reg_write(ah, 0x800000a8, 0x8140); ath5k_hw_reg_write(ah, 0x000000ff, 0x9958); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; case AR5K_RF2425: ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(rf2425_ini_mode_end), rf2425_ini_mode_end, mode); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf2425_ini_common_end), rf2425_ini_common_end, skip_pcu); ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5112_ini_bbgain), rf5112_ini_bbgain, skip_pcu); break; default: return -EINVAL; } /* For AR5211 */ } else if (ah->ah_version == AR5K_AR5211) { /* AR5K_MODE_11B */ if (mode > 2) { ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode); return -EINVAL; } /* Mode-specific settings */ ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5211_ini_mode), ar5211_ini_mode, mode); /* * Write initial settings common for all modes */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini), ar5211_ini, skip_pcu); /* AR5211 only comes with 5111 */ /* Baseband gain table */ ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain), rf5111_ini_bbgain, skip_pcu); /* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */ } else if (ah->ah_version == AR5K_AR5210) { ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini), ar5210_ini, skip_pcu); } return 0; }
gpl-2.0
whdgmawkd/furnace_kk_lge_msm8974
arch/alpha/kernel/sys_alcor.c
9020
7714
/* * linux/arch/alpha/kernel/sys_alcor.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the ALCOR and XLT (XL-300/366/433). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/reboot.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/mmu_context.h> #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for ENABLED irqs. */ static unsigned long cached_irq_mask; static inline void alcor_update_irq_hw(unsigned long mask) { *(vuip)GRU_INT_MASK = mask; mb(); } static inline void alcor_enable_irq(struct irq_data *d) { alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); } static void alcor_disable_irq(struct irq_data *d) { alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); } static void alcor_mask_and_ack_irq(struct irq_data *d) { alcor_disable_irq(d); /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); *(vuip)GRU_INT_CLEAR = 0; mb(); } static void alcor_isa_mask_and_ack_irq(struct irq_data *d) { i8259a_mask_and_ack_irq(d); /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); *(vuip)GRU_INT_CLEAR = 0; mb(); } static struct irq_chip alcor_irq_type = { .name = "ALCOR", .irq_unmask = alcor_enable_irq, .irq_mask = alcor_disable_irq, .irq_mask_ack = alcor_mask_and_ack_irq, }; static void alcor_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary register of the GRU */ pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS; /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 31) { isa_device_interrupt(vector); } else { handle_irq(16 + i); } } } static void __init alcor_init_irq(void) { long i; if (alpha_using_srm) alpha_mv.device_interrupt = srm_device_interrupt; *(vuip)GRU_INT_MASK = 0; mb(); /* all disabled */ *(vuip)GRU_INT_EDGE = 0; mb(); /* all are level */ *(vuip)GRU_INT_HILO = 0x80000000U; mb(); /* ISA only HI */ *(vuip)GRU_INT_CLEAR = 0; mb(); /* all clear */ for (i = 16; i < 48; ++i) { /* On Alcor, at least, lines 20..30 are not connected and can generate spurious interrupts if we turn them on while IRQ probing. */ if (i >= 16+20 && i <= 16+30) continue; irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; init_i8259a_irqs(); common_init_isa_dma(); setup_irq(16+31, &isa_cascade_irqaction); } /* * PCI Fixup configuration. * * Summary @ GRU_INT_REQ: * Bit Meaning * 0 Interrupt Line A from slot 2 * 1 Interrupt Line B from slot 2 * 2 Interrupt Line C from slot 2 * 3 Interrupt Line D from slot 2 * 4 Interrupt Line A from slot 1 * 5 Interrupt line B from slot 1 * 6 Interrupt Line C from slot 1 * 7 Interrupt Line D from slot 1 * 8 Interrupt Line A from slot 0 * 9 Interrupt Line B from slot 0 *10 Interrupt Line C from slot 0 *11 Interrupt Line D from slot 0 *12 Interrupt Line A from slot 4 *13 Interrupt Line B from slot 4 *14 Interrupt Line C from slot 4 *15 Interrupt Line D from slot 4 *16 Interrupt Line D from slot 3 *17 Interrupt Line D from slot 3 *18 Interrupt Line D from slot 3 *19 Interrupt Line D from slot 3 *20-30 Reserved *31 EISA interrupt * * The device to slot mapping looks like: * * Slot Device * 6 built-in TULIP (XLT only) * 7 PCI on board slot 0 * 8 PCI on board slot 3 * 9 PCI on board slot 4 * 10 PCEB (PCI-EISA bridge) * 11 PCI on board slot 2 * 12 PCI on board slot 1 * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int __init alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[7][5] __initdata = { /*INT INTA INTB INTC INTD */ /* note: IDSEL 17 is XLT only */ {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */ {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */ {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */ { -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */ { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */ { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ }; const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static void alcor_kill_arch(int mode) { cia_kill_arch(mode); #ifndef ALPHA_RESTORE_SRM_SETUP switch(mode) { case LINUX_REBOOT_CMD_RESTART: /* Who said DEC engineer's have no sense of humor? ;-) */ if (alpha_using_srm) { *(vuip) GRU_RESET = 0x0000dead; mb(); } break; case LINUX_REBOOT_CMD_HALT: break; case LINUX_REBOOT_CMD_POWER_OFF: break; } halt(); #endif } static void __init alcor_init_pci(void) { struct pci_dev *dev; cia_init_pci(); /* * Now we can look to see if we are really running on an XLT-type * motherboard, by looking for a 21040 TULIP in slot 6, which is * built into XLT and BRET/MAVERICK, but not available on ALCOR. */ dev = pci_get_device(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, NULL); if (dev && dev->devfn == PCI_DEVFN(6,0)) { alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS; printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n", __func__); } pci_dev_put(dev); } /* * The System Vectors */ struct alpha_machine_vector alcor_mv __initmv = { .vector_name = "Alcor", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = alcor_device_interrupt, .init_arch = cia_init_arch, .init_irq = alcor_init_irq, .init_rtc = common_init_rtc, .init_pci = alcor_init_pci, .kill_arch = alcor_kill_arch, .pci_map_irq = alcor_map_irq, .pci_swizzle = common_swizzle, .sys = { .cia = { .gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS }} }; ALIAS_MV(alcor) struct alpha_machine_vector xlt_mv __initmv = { .vector_name = "XLT", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = alcor_device_interrupt, .init_arch = cia_init_arch, .init_irq = alcor_init_irq, .init_rtc = common_init_rtc, .init_pci = alcor_init_pci, .kill_arch = alcor_kill_arch, .pci_map_irq = alcor_map_irq, .pci_swizzle = common_swizzle, .sys = { .cia = { .gru_int_req_bits = XLT_GRU_INT_REQ_BITS }} }; /* No alpha_mv alias for XLT, since we compile it in unconditionally with ALCOR; setup_arch knows how to cope. */
gpl-2.0
mathieudevos/linux_kernel_3.2.48
net/ipv4/tcp_scalable.c
10556
1448
/* Tom Kelly's Scalable TCP * * See http://www.deneholme.net/tom/scalable/ * * John Heffner <jheffner@sc.edu> */ #include <linux/module.h> #include <net/tcp.h> /* These factors derived from the recommended values in the aer: * .01 and and 7/8. We use 50 instead of 100 to account for * delayed ack. */ #define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_MD_SCALE 3 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { struct tcp_sock *tp = tcp_sk(sk); if (!tcp_is_cwnd_limited(sk, in_flight)) return; if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp); else tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); } static u32 tcp_scalable_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); } static struct tcp_congestion_ops tcp_scalable __read_mostly = { .ssthresh = tcp_scalable_ssthresh, .cong_avoid = tcp_scalable_cong_avoid, .min_cwnd = tcp_reno_min_cwnd, .owner = THIS_MODULE, .name = "scalable", }; static int __init tcp_scalable_register(void) { return tcp_register_congestion_control(&tcp_scalable); } static void __exit tcp_scalable_unregister(void) { tcp_unregister_congestion_control(&tcp_scalable); } module_init(tcp_scalable_register); module_exit(tcp_scalable_unregister); MODULE_AUTHOR("John Heffner"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Scalable TCP");
gpl-2.0
crystax/android-toolchain-gcc-5
gcc/testsuite/gcc.dg/vect/costmodel/x86_64/costmodel-vect-31.c
61
1366
/* { dg-require-effective-target vect_int } */ #include <stdarg.h> #include "../../tree-vect.h" #define N 32 struct t{ int k[N]; int l; }; struct s{ char a; /* aligned */ char b[N-1]; /* unaligned (offset 1B) */ char c[N]; /* aligned (offset NB) */ struct t d; /* aligned (offset 2NB) */ struct t e; /* unaligned (offset 2N+4N+4 B) */ }; struct s tmp = { 1 }; int main1 () { int i; /* unaligned */ for (i = 0; i < N/2; i++) { tmp.b[i] = 5; } /* check results: */ for (i = 0; i <N/2; i++) { if (tmp.b[i] != 5) abort (); } /* aligned */ for (i = 0; i < N/2; i++) { tmp.c[i] = 6; } /* check results: */ for (i = 0; i <N/2; i++) { if (tmp.c[i] != 6) abort (); } /* aligned */ for (i = 0; i < N/2; i++) { tmp.d.k[i] = 7; } /* check results: */ for (i = 0; i <N/2; i++) { if (tmp.d.k[i] != 7) abort (); } /* unaligned */ for (i = 0; i < N/2; i++) { tmp.e.k[i] = 8; } /* check results: */ for (i = 0; i <N/2; i++) { if (tmp.e.k[i] != 8) abort (); } return 0; } int main (void) { check_vect (); return main1 (); } /* { dg-final { scan-tree-dump-times "vectorized 4 loops" 1 "vect" } } */ /* { dg-final { cleanup-tree-dump "vect" } } */
gpl-2.0
jmahler/linux-next
tools/perf/ui/hist.c
61
19481
#include <inttypes.h> #include <math.h> #include <linux/compiler.h> #include "../util/hist.h" #include "../util/util.h" #include "../util/sort.h" #include "../util/evsel.h" #include "../util/evlist.h" /* hist period print (hpp) functions */ #define hpp__call_print_fn(hpp, fn, fmt, ...) \ ({ \ int __ret = fn(hpp, fmt, ##__VA_ARGS__); \ advance_hpp(hpp, __ret); \ __ret; \ }) static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, const char *fmt, int len, hpp_snprint_fn print_fn, bool fmt_percent) { int ret; struct hists *hists = he->hists; struct perf_evsel *evsel = hists_to_evsel(hists); char *buf = hpp->buf; size_t size = hpp->size; if (fmt_percent) { double percent = 0.0; u64 total = hists__total_period(hists); if (total) percent = 100.0 * get_field(he) / total; ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent); } else ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); if (perf_evsel__is_group_event(evsel)) { int prev_idx, idx_delta; struct hist_entry *pair; int nr_members = evsel->nr_members; prev_idx = perf_evsel__group_idx(evsel); list_for_each_entry(pair, &he->pairs.head, pairs.node) { u64 period = get_field(pair); u64 total = hists__total_period(pair->hists); if (!total) continue; evsel = hists_to_evsel(pair->hists); idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; while (idx_delta--) { /* * zero-fill group members in the middle which * have no sample */ if (fmt_percent) { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 0.0); } else { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 0ULL); } } if (fmt_percent) { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 100.0 * period / total); } else { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, period); } prev_idx = perf_evsel__group_idx(evsel); } idx_delta = nr_members - prev_idx - 1; while (idx_delta--) { /* * zero-fill group members at last which have no sample */ if (fmt_percent) { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 0.0); } else { ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 0ULL); } } } /* * Restore original buf and size as it's where caller expects * the result will be saved. */ hpp->buf = buf; hpp->size = size; return ret; } int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) { int len = fmt->user_len ?: fmt->len; if (symbol_conf.field_sep) { return __hpp__fmt(hpp, he, get_field, fmtstr, 1, print_fn, fmt_percent); } if (fmt_percent) len -= 2; /* 2 for a space and a % sign */ else len -= 1; return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); } int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he, hpp_field_fn get_field, const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) { if (!symbol_conf.cumulate_callchain) { int len = fmt->user_len ?: fmt->len; return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); } return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent); } static int field_cmp(u64 field_a, u64 field_b) { if (field_a > field_b) return 1; if (field_a < field_b) return -1; return 0; } static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, hpp_field_fn get_field) { s64 ret; int i, nr_members; struct perf_evsel *evsel; struct hist_entry *pair; u64 *fields_a, *fields_b; ret = field_cmp(get_field(a), get_field(b)); if (ret || !symbol_conf.event_group) return ret; evsel = hists_to_evsel(a->hists); if (!perf_evsel__is_group_event(evsel)) return ret; nr_members = evsel->nr_members; fields_a = calloc(nr_members, sizeof(*fields_a)); fields_b = calloc(nr_members, sizeof(*fields_b)); if (!fields_a || !fields_b) goto out; list_for_each_entry(pair, &a->pairs.head, pairs.node) { evsel = hists_to_evsel(pair->hists); fields_a[perf_evsel__group_idx(evsel)] = get_field(pair); } list_for_each_entry(pair, &b->pairs.head, pairs.node) { evsel = hists_to_evsel(pair->hists); fields_b[perf_evsel__group_idx(evsel)] = get_field(pair); } for (i = 1; i < nr_members; i++) { ret = field_cmp(fields_a[i], fields_b[i]); if (ret) break; } out: free(fields_a); free(fields_b); return ret; } static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, hpp_field_fn get_field) { s64 ret = 0; if (symbol_conf.cumulate_callchain) { /* * Put caller above callee when they have equal period. */ ret = field_cmp(get_field(a), get_field(b)); if (ret) return ret; if (a->thread != b->thread || !symbol_conf.use_callchain) return 0; ret = b->callchain->max_depth - a->callchain->max_depth; if (callchain_param.order == ORDER_CALLER) ret = -ret; } return ret; } static int hpp__width_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp __maybe_unused, struct hists *hists) { int len = fmt->user_len ?: fmt->len; struct perf_evsel *evsel = hists_to_evsel(hists); if (symbol_conf.event_group) len = max(len, evsel->nr_members * fmt->len); if (len < (int)strlen(fmt->name)) len = strlen(fmt->name); return len; } static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hists *hists, int line __maybe_unused, int *span __maybe_unused) { int len = hpp__width_fn(fmt, hpp, hists); return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); } int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) { va_list args; ssize_t ssize = hpp->size; double percent; int ret, len; va_start(args, fmt); len = va_arg(args, int); percent = va_arg(args, double); ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent); va_end(args); return (ret >= ssize) ? (ssize - 1) : ret; } static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) { va_list args; ssize_t ssize = hpp->size; int ret; va_start(args, fmt); ret = vsnprintf(hpp->buf, hpp->size, fmt, args); va_end(args); return (ret >= ssize) ? (ssize - 1) : ret; } #define __HPP_COLOR_PERCENT_FN(_type, _field) \ static u64 he_get_##_field(struct hist_entry *he) \ { \ return he->stat._field; \ } \ \ static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ hpp_color_scnprintf, true); \ } #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ hpp_entry_scnprintf, true); \ } #define __HPP_SORT_FN(_type, _field) \ static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ struct hist_entry *a, struct hist_entry *b) \ { \ return __hpp__sort(a, b, he_get_##_field); \ } #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ static u64 he_get_acc_##_field(struct hist_entry *he) \ { \ return he->stat_acc->_field; \ } \ \ static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ hpp_color_scnprintf, true); \ } #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ hpp_entry_scnprintf, true); \ } #define __HPP_SORT_ACC_FN(_type, _field) \ static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ struct hist_entry *a, struct hist_entry *b) \ { \ return __hpp__sort_acc(a, b, he_get_acc_##_field); \ } #define __HPP_ENTRY_RAW_FN(_type, _field) \ static u64 he_get_raw_##_field(struct hist_entry *he) \ { \ return he->stat._field; \ } \ \ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ struct perf_hpp *hpp, struct hist_entry *he) \ { \ return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ hpp_entry_scnprintf, false); \ } #define __HPP_SORT_RAW_FN(_type, _field) \ static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ struct hist_entry *a, struct hist_entry *b) \ { \ return __hpp__sort(a, b, he_get_raw_##_field); \ } #define HPP_PERCENT_FNS(_type, _field) \ __HPP_COLOR_PERCENT_FN(_type, _field) \ __HPP_ENTRY_PERCENT_FN(_type, _field) \ __HPP_SORT_FN(_type, _field) #define HPP_PERCENT_ACC_FNS(_type, _field) \ __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ __HPP_SORT_ACC_FN(_type, _field) #define HPP_RAW_FNS(_type, _field) \ __HPP_ENTRY_RAW_FN(_type, _field) \ __HPP_SORT_RAW_FN(_type, _field) HPP_PERCENT_FNS(overhead, period) HPP_PERCENT_FNS(overhead_sys, period_sys) HPP_PERCENT_FNS(overhead_us, period_us) HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys) HPP_PERCENT_FNS(overhead_guest_us, period_guest_us) HPP_PERCENT_ACC_FNS(overhead_acc, period) HPP_RAW_FNS(samples, nr_events) HPP_RAW_FNS(period, period) static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, struct hist_entry *a __maybe_unused, struct hist_entry *b __maybe_unused) { return 0; } static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a) { return a->header == hpp__header_fn; } static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) { if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b)) return false; return a->idx == b->idx; } #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \ { \ .name = _name, \ .header = hpp__header_fn, \ .width = hpp__width_fn, \ .color = hpp__color_ ## _fn, \ .entry = hpp__entry_ ## _fn, \ .cmp = hpp__nop_cmp, \ .collapse = hpp__nop_cmp, \ .sort = hpp__sort_ ## _fn, \ .idx = PERF_HPP__ ## _idx, \ .equal = hpp__equal, \ } #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \ { \ .name = _name, \ .header = hpp__header_fn, \ .width = hpp__width_fn, \ .color = hpp__color_ ## _fn, \ .entry = hpp__entry_ ## _fn, \ .cmp = hpp__nop_cmp, \ .collapse = hpp__nop_cmp, \ .sort = hpp__sort_ ## _fn, \ .idx = PERF_HPP__ ## _idx, \ .equal = hpp__equal, \ } #define HPP__PRINT_FNS(_name, _fn, _idx) \ { \ .name = _name, \ .header = hpp__header_fn, \ .width = hpp__width_fn, \ .entry = hpp__entry_ ## _fn, \ .cmp = hpp__nop_cmp, \ .collapse = hpp__nop_cmp, \ .sort = hpp__sort_ ## _fn, \ .idx = PERF_HPP__ ## _idx, \ .equal = hpp__equal, \ } struct perf_hpp_fmt perf_hpp__format[] = { HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD), HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS), HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US), HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS), HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US), HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC), HPP__PRINT_FNS("Samples", samples, SAMPLES), HPP__PRINT_FNS("Period", period, PERIOD) }; struct perf_hpp_list perf_hpp_list = { .fields = LIST_HEAD_INIT(perf_hpp_list.fields), .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts), .nr_header_lines = 1, }; #undef HPP__COLOR_PRINT_FNS #undef HPP__COLOR_ACC_PRINT_FNS #undef HPP__PRINT_FNS #undef HPP_PERCENT_FNS #undef HPP_PERCENT_ACC_FNS #undef HPP_RAW_FNS #undef __HPP_HEADER_FN #undef __HPP_WIDTH_FN #undef __HPP_COLOR_PERCENT_FN #undef __HPP_ENTRY_PERCENT_FN #undef __HPP_COLOR_ACC_PERCENT_FN #undef __HPP_ENTRY_ACC_PERCENT_FN #undef __HPP_ENTRY_RAW_FN #undef __HPP_SORT_FN #undef __HPP_SORT_ACC_FN #undef __HPP_SORT_RAW_FN void perf_hpp__init(void) { int i; for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { struct perf_hpp_fmt *fmt = &perf_hpp__format[i]; INIT_LIST_HEAD(&fmt->list); /* sort_list may be linked by setup_sorting() */ if (fmt->sort_list.next == NULL) INIT_LIST_HEAD(&fmt->sort_list); } /* * If user specified field order, no need to setup default fields. */ if (is_strict_order(field_order)) return; if (symbol_conf.cumulate_callchain) { hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC); perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; } hpp_dimension__add_output(PERF_HPP__OVERHEAD); if (symbol_conf.show_cpu_utilization) { hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS); hpp_dimension__add_output(PERF_HPP__OVERHEAD_US); if (perf_guest) { hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS); hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US); } } if (symbol_conf.show_nr_samples) hpp_dimension__add_output(PERF_HPP__SAMPLES); if (symbol_conf.show_total_period) hpp_dimension__add_output(PERF_HPP__PERIOD); } void perf_hpp_list__column_register(struct perf_hpp_list *list, struct perf_hpp_fmt *format) { list_add_tail(&format->list, &list->fields); } void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, struct perf_hpp_fmt *format) { list_add_tail(&format->sort_list, &list->sorts); } void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, struct perf_hpp_fmt *format) { list_add(&format->sort_list, &list->sorts); } void perf_hpp__column_unregister(struct perf_hpp_fmt *format) { list_del(&format->list); } void perf_hpp__cancel_cumulate(void) { struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp; if (is_strict_order(field_order)) return; ovh = &perf_hpp__format[PERF_HPP__OVERHEAD]; acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC]; perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) { if (acc->equal(acc, fmt)) { perf_hpp__column_unregister(fmt); continue; } if (ovh->equal(ovh, fmt)) fmt->name = "Overhead"; } } static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) { return a->equal && a->equal(a, b); } void perf_hpp__setup_output_field(struct perf_hpp_list *list) { struct perf_hpp_fmt *fmt; /* append sort keys to output field */ perf_hpp_list__for_each_sort_list(list, fmt) { struct perf_hpp_fmt *pos; /* skip sort-only fields ("sort_compute" in perf diff) */ if (!fmt->entry && !fmt->color) continue; perf_hpp_list__for_each_format(list, pos) { if (fmt_equal(fmt, pos)) goto next; } perf_hpp__column_register(fmt); next: continue; } } void perf_hpp__append_sort_keys(struct perf_hpp_list *list) { struct perf_hpp_fmt *fmt; /* append output fields to sort keys */ perf_hpp_list__for_each_format(list, fmt) { struct perf_hpp_fmt *pos; perf_hpp_list__for_each_sort_list(list, pos) { if (fmt_equal(fmt, pos)) goto next; } perf_hpp__register_sort_field(fmt); next: continue; } } static void fmt_free(struct perf_hpp_fmt *fmt) { if (fmt->free) fmt->free(fmt); } void perf_hpp__reset_output_field(struct perf_hpp_list *list) { struct perf_hpp_fmt *fmt, *tmp; /* reset output fields */ perf_hpp_list__for_each_format_safe(list, fmt, tmp) { list_del_init(&fmt->list); list_del_init(&fmt->sort_list); fmt_free(fmt); } /* reset sort keys */ perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) { list_del_init(&fmt->list); list_del_init(&fmt->sort_list); fmt_free(fmt); } } /* * See hists__fprintf to match the column widths */ unsigned int hists__sort_list_width(struct hists *hists) { struct perf_hpp_fmt *fmt; int ret = 0; bool first = true; struct perf_hpp dummy_hpp; hists__for_each_format(hists, fmt) { if (perf_hpp__should_skip(fmt, hists)) continue; if (first) first = false; else ret += 2; ret += fmt->width(fmt, &dummy_hpp, hists); } if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */ ret += 3 + BITS_PER_LONG / 4; return ret; } unsigned int hists__overhead_width(struct hists *hists) { struct perf_hpp_fmt *fmt; int ret = 0; bool first = true; struct perf_hpp dummy_hpp; hists__for_each_format(hists, fmt) { if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) break; if (first) first = false; else ret += 2; ret += fmt->width(fmt, &dummy_hpp, hists); } return ret; } void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) { if (perf_hpp__is_sort_entry(fmt)) return perf_hpp__reset_sort_width(fmt, hists); if (perf_hpp__is_dynamic_entry(fmt)) return; BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX); switch (fmt->idx) { case PERF_HPP__OVERHEAD: case PERF_HPP__OVERHEAD_SYS: case PERF_HPP__OVERHEAD_US: case PERF_HPP__OVERHEAD_ACC: fmt->len = 8; break; case PERF_HPP__OVERHEAD_GUEST_SYS: case PERF_HPP__OVERHEAD_GUEST_US: fmt->len = 9; break; case PERF_HPP__SAMPLES: case PERF_HPP__PERIOD: fmt->len = 12; break; default: break; } } void hists__reset_column_width(struct hists *hists) { struct perf_hpp_fmt *fmt; struct perf_hpp_list_node *node; hists__for_each_format(hists, fmt) perf_hpp__reset_width(fmt, hists); /* hierarchy entries have their own hpp list */ list_for_each_entry(node, &hists->hpp_formats, list) { perf_hpp_list__for_each_format(&node->hpp, fmt) perf_hpp__reset_width(fmt, hists); } } void perf_hpp__set_user_width(const char *width_list_str) { struct perf_hpp_fmt *fmt; const char *ptr = width_list_str; perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { char *p; int len = strtol(ptr, &p, 10); fmt->user_len = len; if (*p == ',') ptr = p + 1; else break; } } static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt) { struct perf_hpp_list_node *node = NULL; struct perf_hpp_fmt *fmt_copy; bool found = false; bool skip = perf_hpp__should_skip(fmt, hists); list_for_each_entry(node, &hists->hpp_formats, list) { if (node->level == fmt->level) { found = true; break; } } if (!found) { node = malloc(sizeof(*node)); if (node == NULL) return -1; node->skip = skip; node->level = fmt->level; perf_hpp_list__init(&node->hpp); hists->nr_hpp_node++; list_add_tail(&node->list, &hists->hpp_formats); } fmt_copy = perf_hpp_fmt__dup(fmt); if (fmt_copy == NULL) return -1; if (!skip) node->skip = false; list_add_tail(&fmt_copy->list, &node->hpp.fields); list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts); return 0; } int perf_hpp__setup_hists_formats(struct perf_hpp_list *list, struct perf_evlist *evlist) { struct perf_evsel *evsel; struct perf_hpp_fmt *fmt; struct hists *hists; int ret; if (!symbol_conf.report_hierarchy) return 0; evlist__for_each_entry(evlist, evsel) { hists = evsel__hists(evsel); perf_hpp_list__for_each_sort_list(list, fmt) { if (perf_hpp__is_dynamic_entry(fmt) && !perf_hpp__defined_dynamic_entry(fmt, hists)) continue; ret = add_hierarchy_fmt(hists, fmt); if (ret < 0) return ret; } } return 0; }
gpl-2.0
maz-1/android_kernel_lge_msm8974
drivers/leds/leds-lp5521.c
61
47831
/* * LP5521 LED chip driver. * * Copyright (C) 2010 Nokia Corporation * * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/leds.h> #include <linux/leds-lp5521.h> #include <linux/workqueue.h> #include <linux/slab.h> #if defined(CONFIG_MACH_MSM8974_G3_LGU) || defined(CONFIG_MACH_MSM8974_G3_SKT) || defined(CONFIG_MACH_MSM8974_G3_KT) || defined(CONFIG_MACH_MSM8974_G3_ATT) || defined(CONFIG_MACH_MSM8974_G3_VZW) || defined(CONFIG_MACH_MSM8974_G3_SPR_US) || defined(CONFIG_MACH_MSM8974_G3_USC_US) || defined(CONFIG_MACH_MSM8974_G3_ACG_US) || defined(CONFIG_MACH_MSM8974_G3_TMO_US) || defined(CONFIG_MACH_MSM8974_G3_GLOBAL_COM) || defined(CONFIG_MACH_MSM8974_G3_CN) || defined(CONFIG_MACH_MSM8974_G3_CA) || defined(CONFIG_MACH_MSM8974_G3_LRA) || defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W) #include <mach/board_lge.h> #include <linux/regulator/consumer.h> #include <linux/of_gpio.h> #endif #define LP5521_PROGRAM_LENGTH 32 /* in bytes */ #define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */ #define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */ #define LP5521_CMD_LOAD 0x15 /* 00010101 */ #define LP5521_CMD_RUN 0x2a /* 00101010 */ #define LP5521_CMD_DIRECT 0x3f /* 00111111 */ #define LP5521_CMD_DISABLED 0x00 /* 00000000 */ /* Registers */ #define LP5521_REG_ENABLE 0x00 #define LP5521_REG_OP_MODE 0x01 #define LP5521_REG_R_PWM 0x02 #define LP5521_REG_G_PWM 0x03 #define LP5521_REG_B_PWM 0x04 #define LP5521_REG_R_CURRENT 0x05 #define LP5521_REG_G_CURRENT 0x06 #define LP5521_REG_B_CURRENT 0x07 #define LP5521_REG_CONFIG 0x08 #define LP5521_REG_R_CHANNEL_PC 0x09 #define LP5521_REG_G_CHANNEL_PC 0x0A #define LP5521_REG_B_CHANNEL_PC 0x0B #define LP5521_REG_STATUS 0x0C #define LP5521_REG_RESET 0x0D #define LP5521_REG_GPO 0x0E #define LP5521_REG_R_PROG_MEM 0x10 #define LP5521_REG_G_PROG_MEM 0x30 #define LP5521_REG_B_PROG_MEM 0x50 #define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM #define LP5521_PROG_MEM_SIZE 0x20 /* Base register to set LED current */ #define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT /* Base register to set the brightness */ #define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM /* Bits in ENABLE register */ #define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */ #define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */ #define LP5521_EXEC_RUN 0x2A #define LP5521_ENABLE_DEFAULT \ (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM) #define LP5521_ENABLE_RUN_PROGRAM \ (LP5521_ENABLE_DEFAULT | LP5521_EXEC_RUN) /* Status */ #define LP5521_EXT_CLK_USED 0x08 /* default R channel current register value */ #define LP5521_REG_R_CURR_DEFAULT 0xAF /* Current index max step */ #define PATTERN_CURRENT_INDEX_STEP_HAL 255 /* Pattern Mode */ #define PATTERN_OFF 0 #define PATTERN_BLINK_ON -1 /*GV DCM Felica Pattern Mode*/ #define PATTERN_FELICA_ON 101 #define PATTERN_GPS_ON 102 /*GK Favorite MissedNoit Pattern Mode*/ #define PATTERN_FAVORITE_MISSED_NOTI 14 /*GK ATT Power off charged Mode (charge complete brightness 50%)*/ #define PATTERN_CHARGING_COMPLETE_50 15 #define PATTERN_CHARGING_50 16 /* Program Commands */ #define CMD_SET_PWM 0x40 #define CMD_WAIT_LSB 0x00 #define MAX_BLINK_TIME 60000 /* 60 sec */ enum lp5521_wait_type { LP5521_CYCLE_INVALID, LP5521_CYCLE_50ms, LP5521_CYCLE_100ms, LP5521_CYCLE_200ms, LP5521_CYCLE_500ms, LP5521_CYCLE_700ms, LP5521_CYCLE_920ms, LP5521_CYCLE_982ms, LP5521_CYCLE_MAX, }; struct lp5521_pattern_cmd { u8 r[LP5521_PROGRAM_LENGTH]; u8 g[LP5521_PROGRAM_LENGTH]; u8 b[LP5521_PROGRAM_LENGTH]; unsigned pc_r; unsigned pc_g; unsigned pc_b; }; struct lp5521_wait_param { unsigned cycle; unsigned limit; u8 cmd; }; static const struct lp5521_wait_param lp5521_wait_params[LP5521_CYCLE_MAX] = { [LP5521_CYCLE_50ms] = { .cycle = 50, .limit = 3000, .cmd = 0x43, }, [LP5521_CYCLE_100ms] = { .cycle = 100, .limit = 6000, .cmd = 0x46, }, [LP5521_CYCLE_200ms] = { .cycle = 200, .limit = 10000, .cmd = 0x4d, }, [LP5521_CYCLE_500ms] = { .cycle = 500, .limit = 30000, .cmd = 0x60, }, [LP5521_CYCLE_700ms] = { .cycle = 700, .limit = 40000, .cmd = 0x6d, }, [LP5521_CYCLE_920ms] = { .cycle = 920, .limit = 50000, .cmd = 0x7b, }, [LP5521_CYCLE_982ms] = { .cycle = 982, .limit = 60000, .cmd = 0x7f, }, }; #if defined(CONFIG_MACH_MSM8974_G3_LGU) || defined(CONFIG_MACH_MSM8974_G3_SKT) || defined(CONFIG_MACH_MSM8974_G3_KT) || defined(CONFIG_MACH_MSM8974_G3_ATT) || defined(CONFIG_MACH_MSM8974_G3_VZW) || defined(CONFIG_MACH_MSM8974_G3_SPR_US) || defined(CONFIG_MACH_MSM8974_G3_USC_US) || defined(CONFIG_MACH_MSM8974_G3_ACG_US) || defined(CONFIG_MACH_MSM8974_G3_TMO_US) || defined(CONFIG_MACH_MSM8974_G3_GLOBAL_COM) || defined(CONFIG_MACH_MSM8974_G3_CN) || defined(CONFIG_MACH_MSM8974_G3_CA) || defined(CONFIG_MACH_MSM8974_G3_LRA) static struct lp5521_led_config lp5521_led_config[] = { { .name = "R", .chan_nr = 0, .led_current = 25, .max_current = 25, }, { .name = "G", .chan_nr = 1, .led_current = 120, .max_current = 120, }, { .name = "B", .chan_nr = 2, .led_current = 90, .max_current = 90, }, }; #elif defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W) /* for HW rev.1.0 or higher */ static struct lp5521_led_config lp5521_led_config[] = { { .name = "R", .chan_nr = 0, .led_current = 37, .max_current = 37, }, { .name = "G", .chan_nr = 1, .led_current = 180, .max_current = 180, }, { .name = "B", .chan_nr = 2, .led_current = 90, .max_current = 90, }, }; /* for HW rev.C */ static struct lp5521_led_config lp5521_led_config_rev_c[] = { { .name = "R", .chan_nr = 0, .led_current = 25, .max_current = 25, }, { .name = "G", .chan_nr = 1, .led_current = 180, .max_current = 180, }, { .name = "B", .chan_nr = 2, .led_current = 180, .max_current = 180, }, }; /* for HW rev.B or lower */ static struct lp5521_led_config lp5521_led_config_rev_b[] = { { .name = "R", .chan_nr = 0, .led_current = 37, .max_current = 37, }, { .name = "G", .chan_nr = 1, .led_current = 180, .max_current = 180, }, { .name = "B", .chan_nr = 2, .led_current = 180, .max_current = 180, }, }; #else static struct lp5521_led_config lp5521_led_config[] = { { .name = "R", .chan_nr = 0, .led_current = 180, .max_current = 180, }, { .name = "G", .chan_nr = 1, .led_current = 180, .max_current = 180, }, { .name = "B", .chan_nr = 2, .led_current = 180, .max_current = 180, }, }; #endif static struct lp5521_led_pattern board_led_patterns[] = { { /* ID_POWER_ON = 1 */ .r = mode1_red, .g = mode1_green, .b = mode1_blue, .size_r = ARRAY_SIZE(mode1_red), .size_g = ARRAY_SIZE(mode1_green), .size_b = ARRAY_SIZE(mode1_blue), }, { /* ID_LCD_ON = 2 */ .r = mode2_red, .g = mode2_green, .b = mode2_blue, .size_r = ARRAY_SIZE(mode2_red), .size_g = ARRAY_SIZE(mode2_green), .size_b = ARRAY_SIZE(mode2_blue), }, { /* ID_CHARGING = 3 */ .r = mode3_red, .size_r = ARRAY_SIZE(mode3_red), }, { /* ID_CHARGING_FULL = 4 */ .g = mode4_green, .size_g = ARRAY_SIZE(mode4_green), }, { /* ID_CALENDAR_REMIND = 5 */ .r = mode5_red, .g = mode5_green, .size_r = ARRAY_SIZE(mode5_red), .size_g = ARRAY_SIZE(mode5_green), }, { /* ID_POWER_OFF = 6 */ .r = mode6_red, .g = mode6_green, .b = mode6_blue, .size_r = ARRAY_SIZE(mode6_red), .size_g = ARRAY_SIZE(mode6_green), .size_b = ARRAY_SIZE(mode6_blue), }, { /* ID_MISSED_NOTI = 7 */ .r = mode7_red, .g = mode7_green, .b = mode7_blue, .size_r = ARRAY_SIZE(mode7_red), .size_g = ARRAY_SIZE(mode7_green), .size_b = ARRAY_SIZE(mode7_blue), }, #if defined(CONFIG_MACH_MSM8974_G3_LGU) || defined(CONFIG_MACH_MSM8974_G3_SKT) || defined(CONFIG_MACH_MSM8974_G3_KT) || defined(CONFIG_MACH_MSM8974_G3_ATT) || defined(CONFIG_MACH_MSM8974_G3_VZW) || defined(CONFIG_MACH_MSM8974_G3_SPR_US) || defined(CONFIG_MACH_MSM8974_G3_USC_US) || defined(CONFIG_MACH_MSM8974_G3_ACG_US) || defined(CONFIG_MACH_MSM8974_G3_TMO_US) || defined(CONFIG_MACH_MSM8974_G3_GLOBAL_COM) || defined(CONFIG_MACH_MSM8974_G3_CN) || defined(CONFIG_MACH_MSM8974_G3_CA) || defined(CONFIG_MACH_MSM8974_G3_LRA) || defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W) /* for dummy pattern IDs (defined LGLedRecord.java) */ { /* ID_ALARM = 8 */ }, { /* ID_CALL_01 = 9 */ }, { /* ID_CALL_02 = 10 */ }, { /* ID_CALL_03 = 11 */ }, { /* ID_VOLUME_UP = 12 */ }, { /* ID_VOLUME_DOWN = 13 */ }, #endif { /* ID_FAVORITE_MISSED_NOTI = 14 */ .r = mode8_red, .g = mode8_green, .b = mode8_blue, .size_r = ARRAY_SIZE(mode8_red), .size_g = ARRAY_SIZE(mode8_green), .size_b = ARRAY_SIZE(mode8_blue), }, { /* CHARGING_100_FOR_ATT = 15 (use chargerlogo, only AT&T) */ .g = mode4_green_50, .size_g = ARRAY_SIZE(mode4_green_50), }, { /* CHARGING_FOR_ATT = 16 (use chargerlogo, only AT&T) */ .r = mode3_red_50, .size_r = ARRAY_SIZE(mode3_red_50), }, { /* ID_MISSED_NOTI_PINK = 17 */ .r = mode9_red, .g = mode9_green, .b = mode9_blue, .size_r = ARRAY_SIZE(mode9_red), .size_g = ARRAY_SIZE(mode9_green), .size_b = ARRAY_SIZE(mode9_blue), }, { /* ID_MISSED_NOTI_BLUE = 18 */ .r = mode10_red, .g = mode10_green, .b = mode10_blue, .size_r = ARRAY_SIZE(mode10_red), .size_g = ARRAY_SIZE(mode10_green), .size_b = ARRAY_SIZE(mode10_blue), }, { /* ID_MISSED_NOTI_ORANGE = 19 */ .r = mode11_red, .g = mode11_green, .b = mode11_blue, .size_r = ARRAY_SIZE(mode11_red), .size_g = ARRAY_SIZE(mode11_green), .size_b = ARRAY_SIZE(mode11_blue), }, { /* ID_MISSED_NOTI_YELLOW = 20 */ .r = mode12_red, .g = mode12_green, .b = mode12_blue, .size_r = ARRAY_SIZE(mode12_red), .size_g = ARRAY_SIZE(mode12_green), .size_b = ARRAY_SIZE(mode12_blue), }, /* for dummy pattern IDs (defined LGLedRecord.java) */ { /* ID_INCALL_PINK = 21 */ }, { /* ID_INCALL_BLUE = 22 */ }, { /* ID_INCALL_ORANGE = 23 */ }, { /* ID_INCALL_YELLOW = 24 */ }, { /* ID_INCALL_TURQUOISE = 25 */ }, { /* ID_INCALL_PURPLE = 26 */ }, { /* ID_INCALL_RED = 27 */ }, { /* ID_INCALL_LIME = 28 */ }, { /* ID_MISSED_NOTI_TURQUOISE = 29 */ .r = mode13_red, .g = mode13_green, .b = mode13_blue, .size_r = ARRAY_SIZE(mode13_red), .size_g = ARRAY_SIZE(mode13_green), .size_b = ARRAY_SIZE(mode13_blue), }, { /* ID_MISSED_NOTI_PURPLE = 30 */ .r = mode14_red, .g = mode14_green, .b = mode14_blue, .size_r = ARRAY_SIZE(mode14_red), .size_g = ARRAY_SIZE(mode14_green), .size_b = ARRAY_SIZE(mode14_blue), }, { /* ID_MISSED_NOTI_RED = 31 */ .r = mode15_red, .g = mode15_green, .b = mode15_blue, .size_r = ARRAY_SIZE(mode15_red), .size_g = ARRAY_SIZE(mode15_green), .size_b = ARRAY_SIZE(mode15_blue), }, { /* ID_MISSED_NOTI_LIME = 32 */ .r = mode16_red, .g = mode16_green, .b = mode16_blue, .size_r = ARRAY_SIZE(mode16_red), .size_g = ARRAY_SIZE(mode16_green), .size_b = ARRAY_SIZE(mode16_blue), }, { /* ID_NONE = 33 */ }, { /* ID_NONE = 34 */ }, { /* ID_INCALL = 35 */ .r = mode17_red, .g = mode17_green, .b = mode17_blue, .size_r = ARRAY_SIZE(mode17_red), .size_g = ARRAY_SIZE(mode17_green), .size_b = ARRAY_SIZE(mode17_blue), }, { /* ID_NONE = 36 */ }, { /* ID_URGENT_CALL_MISSED_NOTI = 37 */ .r = mode18_red, .g = mode18_green, .b = mode18_blue, .size_r = ARRAY_SIZE(mode18_red), .size_g = ARRAY_SIZE(mode18_green), .size_b = ARRAY_SIZE(mode18_blue), }, }; #define LP5521_CONFIGS (LP5521_PWM_HF | LP5521_PWRSAVE_EN | \ LP5521_CP_MODE_AUTO | \ LP5521_CLOCK_INT) struct lp5521_chip *chip; static void lp5521_enable(bool state) { int ret = 0; LP5521_INFO_MSG("LP5521: [%s] state = %d\n", __func__, state); if (!gpio_is_valid(chip->rgb_led_en)) { pr_err("rgb_led_en gpio_request failed for %d ret=%d\n", chip->rgb_led_en, ret); return; } if (lge_get_board_revno() > HW_REV_EVB2) { if (state) { gpio_set_value(chip->rgb_led_en, 1); LP5521_INFO_MSG("LP5521: [%s] RGB_EN(gpio #%d) set to HIGH\n", __func__, chip->rgb_led_en); } else { gpio_set_value(chip->rgb_led_en, 0); LP5521_INFO_MSG("LP5521: [%s] RGB_EN(gpio #%d) set to LOW\n", __func__, chip->rgb_led_en); } } return; } static struct lp5521_platform_data lp5521_pdata = { .led_config = lp5521_led_config, .num_channels = ARRAY_SIZE(lp5521_led_config), .clock_mode = LP5521_CLOCK_INT, .update_config = LP5521_CONFIGS, .patterns = board_led_patterns, .num_patterns = ARRAY_SIZE(board_led_patterns), .enable = lp5521_enable }; static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev) { return container_of(cdev, struct lp5521_led, cdev); } static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine) { return container_of(engine, struct lp5521_chip, engines[engine->id - 1]); } static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led) { return container_of(led, struct lp5521_chip, leds[led->id]); } static void lp5521_led_brightness_work(struct work_struct *work); static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value) { return i2c_smbus_write_byte_data(client, reg, value); } static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf) { s32 ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) return -EIO; *buf = ret; return 0; } static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode) { struct lp5521_chip *chip = engine_to_lp5521(engine); struct i2c_client *client = chip->client; int ret; u8 engine_state; /* Only transition between RUN and DIRECT mode are handled here */ if (mode == LP5521_CMD_LOAD) return 0; if (mode == LP5521_CMD_DISABLED) mode = LP5521_CMD_DIRECT; ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state); if (ret < 0) return ret; /* set mode only for this engine */ engine_state &= ~(engine->engine_mask); mode &= engine->engine_mask; engine_state |= mode; return lp5521_write(client, LP5521_REG_OP_MODE, engine_state); } static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) { struct lp5521_chip *chip = engine_to_lp5521(eng); struct i2c_client *client = chip->client; int ret; int addr; u8 mode = 0; /* move current engine to direct mode and remember the state */ ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); /* For loading, all the engines to load mode */ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ usleep_range(1000, 2000); addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; i2c_smbus_write_i2c_block_data(client, addr, LP5521_PROG_MEM_SIZE, pattern); ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode); return ret; } static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr) { return lp5521_write(chip->client, LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr, curr); } static void lp5521_init_engine(struct lp5521_chip *chip) { int i; for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { chip->engines[i].id = i + 1; chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2); chip->engines[i].prog_page = i; } } static int lp5521_configure(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int ret; u8 cfg; lp5521_init_engine(chip); /* Set all PWMs to direct control mode */ ret = lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); cfg = chip->pdata->update_config ? : (LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT); ret |= lp5521_write(client, LP5521_REG_CONFIG, cfg); /* Initialize all channels PWM to zero -> leds off */ ret |= lp5521_write(client, LP5521_REG_R_PWM, 0); ret |= lp5521_write(client, LP5521_REG_G_PWM, 0); ret |= lp5521_write(client, LP5521_REG_B_PWM, 0); /* Set engines are set to run state when OP_MODE enables engines */ ret |= lp5521_write(client, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM); /* enable takes 500us. 1 - 2 ms leaves some margin */ usleep_range(1000, 2000); return ret; } static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf) { int ret; u8 status; ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status); if (ret < 0) return ret; /* Check that ext clock is really in use if requested */ if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT) if ((status & LP5521_EXT_CLK_USED) == 0) return -EIO; return 0; } static void lp5521_set_brightness(struct led_classdev *cdev, enum led_brightness brightness) { struct lp5521_led *led = cdev_to_led(cdev); static unsigned long log_counter; led->brightness = (u8)brightness; if (log_counter++ % 100 == 0) { LP5521_INFO_MSG("[%s] brightness : %d", __func__, brightness); } schedule_work(&led->brightness_work); } static void lp5521_led_brightness_work(struct work_struct *work) { struct lp5521_led *led = container_of(work, struct lp5521_led, brightness_work); struct lp5521_chip *chip = led_to_lp5521(led); struct i2c_client *client = chip->client; mutex_lock(&chip->lock); lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr, led->brightness); mutex_unlock(&chip->lock); } /* Detect the chip by setting its ENABLE register and reading it back. */ static int lp5521_detect(struct i2c_client *client) { int ret; u8 buf = 0; ret = lp5521_write(client, LP5521_REG_ENABLE, LP5521_ENABLE_DEFAULT); if (ret) return ret; /* enable takes 500us. 1 - 2 ms leaves some margin */ usleep_range(1000, 2000); ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); if (ret) return ret; if (buf != LP5521_ENABLE_DEFAULT) return -ENODEV; return 0; } /* Set engine mode and create appropriate sysfs attributes, if required. */ static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode) { int ret = 0; /* if in that mode already do nothing, except for run */ if (mode == engine->mode && mode != LP5521_CMD_RUN) return 0; if (mode == LP5521_CMD_RUN) { ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN); } else if (mode == LP5521_CMD_LOAD) { lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); lp5521_set_engine_mode(engine, LP5521_CMD_LOAD); } else if (mode == LP5521_CMD_DISABLED) { lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); } engine->mode = mode; return ret; } static int lp5521_do_store_load(struct lp5521_engine *engine, const char *buf, size_t len) { struct lp5521_chip *chip = engine_to_lp5521(engine); struct i2c_client *client = chip->client; int ret, nrchars, offset = 0, i = 0; char c[3]; unsigned cmd; u8 pattern[LP5521_PROGRAM_LENGTH] = {0}; while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) { /* separate sscanfs because length is working only for %s */ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); if (ret != 2) goto fail; ret = sscanf(c, "%2x", &cmd); if (ret != 1) goto fail; pattern[i] = (u8)cmd; offset += nrchars; i++; } /* Each instruction is 16bit long. Check that length is even */ if (i % 2) goto fail; mutex_lock(&chip->lock); if (engine->mode == LP5521_CMD_LOAD) ret = lp5521_load_program(engine, pattern); else ret = -EINVAL; mutex_unlock(&chip->lock); if (ret) { dev_err(&client->dev, "failed loading pattern\n"); return ret; } return len; fail: dev_err(&client->dev, "wrong pattern format\n"); return -EINVAL; } static ssize_t store_engine_load(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); return lp5521_do_store_load(&chip->engines[nr - 1], buf, len); } #define store_load(nr) \ static ssize_t store_engine##nr##_load(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t len) \ { \ return store_engine_load(dev, attr, buf, len, nr); \ } store_load(1) store_load(2) store_load(3) static ssize_t show_engine_mode(struct device *dev, struct device_attribute *attr, char *buf, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); switch (chip->engines[nr - 1].mode) { case LP5521_CMD_RUN: return sprintf(buf, "run\n"); case LP5521_CMD_LOAD: return sprintf(buf, "load\n"); case LP5521_CMD_DISABLED: return sprintf(buf, "disabled\n"); default: return sprintf(buf, "disabled\n"); } } #define show_mode(nr) \ static ssize_t show_engine##nr##_mode(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ return show_engine_mode(dev, attr, buf, nr); \ } show_mode(1) show_mode(2) show_mode(3) static ssize_t store_engine_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int nr) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); struct lp5521_engine *engine = &chip->engines[nr - 1]; mutex_lock(&chip->lock); if (!strncmp(buf, "run", 3)) lp5521_set_mode(engine, LP5521_CMD_RUN); else if (!strncmp(buf, "load", 4)) lp5521_set_mode(engine, LP5521_CMD_LOAD); else if (!strncmp(buf, "disabled", 8)) lp5521_set_mode(engine, LP5521_CMD_DISABLED); mutex_unlock(&chip->lock); return len; } #define store_mode(nr) \ static ssize_t store_engine##nr##_mode(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t len) \ { \ return store_engine_mode(dev, attr, buf, len, nr); \ } store_mode(1) store_mode(2) store_mode(3) static ssize_t show_max_current(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); return sprintf(buf, "%d\n", led->max_current); } static ssize_t show_current(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); return sprintf(buf, "%d\n", led->led_current); } static ssize_t store_current(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lp5521_led *led = cdev_to_led(led_cdev); struct lp5521_chip *chip = led_to_lp5521(led); ssize_t ret; unsigned long curr; if (kstrtoul(buf, 0, &curr)) return -EINVAL; if (curr > led->max_current) return -EINVAL; mutex_lock(&chip->lock); ret = lp5521_set_led_current(chip, led->id, curr); mutex_unlock(&chip->lock); if (ret < 0) return ret; led->led_current = (u8)curr; LP5521_INFO_MSG("[%s] brightness : %d", __func__, (u8)curr); return len; } static ssize_t lp5521_selftest(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct lp5521_chip *chip = i2c_get_clientdata(client); int ret; mutex_lock(&chip->lock); ret = lp5521_run_selftest(chip, buf); mutex_unlock(&chip->lock); return sprintf(buf, "%s\n", ret ? "FAIL" : "OK"); } static void lp5521_clear_program_memory(struct i2c_client *cl) { int i; u8 rgb_mem[] = { LP5521_REG_R_PROG_MEM, LP5521_REG_G_PROG_MEM, LP5521_REG_B_PROG_MEM, }; for (i = 0; i < ARRAY_SIZE(rgb_mem); i++) { lp5521_write(cl, rgb_mem[i], 0); lp5521_write(cl, rgb_mem[i] + 1, 0); } } static void lp5521_write_program_memory(struct i2c_client *cl, u8 base, const u8 *rgb, int size) { int i; if (!rgb || size <= 0) return; for (i = 0; i < size; i++) lp5521_write(cl, base + i, *(rgb + i)); lp5521_write(cl, base + i, 0); lp5521_write(cl, base + i + 1, 0); } static inline struct lp5521_led_pattern *lp5521_get_pattern (struct lp5521_chip *chip, u8 offset) { struct lp5521_led_pattern *ptn; ptn = chip->pdata->patterns + (offset - 1); return ptn; } static void _run_led_pattern(struct lp5521_chip *chip, struct lp5521_led_pattern *ptn) { struct i2c_client *cl = chip->client; lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); usleep_range(1000, 2000); lp5521_clear_program_memory(cl); lp5521_write_program_memory(cl, LP5521_REG_R_PROG_MEM, ptn->r, ptn->size_r); lp5521_write_program_memory(cl, LP5521_REG_G_PROG_MEM, ptn->g, ptn->size_g); lp5521_write_program_memory(cl, LP5521_REG_B_PROG_MEM, ptn->b, ptn->size_b); lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_RUN); usleep_range(1000, 2000); lp5521_write(cl, LP5521_REG_ENABLE, LP5521_ENABLE_RUN_PROGRAM); } static void lp5521_run_led_pattern(int mode, struct lp5521_chip *chip) { struct lp5521_led_pattern *ptn; struct i2c_client *cl = chip->client; int num_patterns = chip->pdata->num_patterns; #if defined(CONFIG_MACH_MSM8974_G3_LGU) || defined(CONFIG_MACH_MSM8974_G3_SKT) || defined(CONFIG_MACH_MSM8974_G3_KT) || defined(CONFIG_MACH_MSM8974_G3_ATT) || defined(CONFIG_MACH_MSM8974_G3_VZW) || defined(CONFIG_MACH_MSM8974_G3_SPR_US) || defined(CONFIG_MACH_MSM8974_G3_USC_US) || defined(CONFIG_MACH_MSM8974_G3_ACG_US) || defined(CONFIG_MACH_MSM8974_G3_TMO_US) || defined(CONFIG_MACH_MSM8974_G3_GLOBAL_COM) || defined(CONFIG_MACH_MSM8974_G3_CN) || defined(CONFIG_MACH_MSM8974_G3_CA) || defined(CONFIG_MACH_MSM8974_G3_LRA) || defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W) if (mode >= 1000) { mode = mode - 1000; } #endif chip->id_pattern_play = mode; #if defined(CONFIG_MACH_MSM8974_G3_LGU) || defined(CONFIG_MACH_MSM8974_G3_SKT) || defined(CONFIG_MACH_MSM8974_G3_KT) || defined(CONFIG_MACH_MSM8974_G3_ATT) || defined(CONFIG_MACH_MSM8974_G3_VZW) || defined(CONFIG_MACH_MSM8974_G3_SPR_US) || defined(CONFIG_MACH_MSM8974_G3_USC_US) || defined(CONFIG_MACH_MSM8974_G3_ACG_US) || defined(CONFIG_MACH_MSM8974_G3_TMO_US) || defined(CONFIG_MACH_MSM8974_G3_GLOBAL_COM) || defined(CONFIG_MACH_MSM8974_G3_CN) || defined(CONFIG_MACH_MSM8974_G3_CA) || defined(CONFIG_MACH_MSM8974_G3_LRA) || defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W) #if 0 /* this process is not need, because dummy pattern defined in board file */ if (mode == PATTERN_FAVORITE_MISSED_NOTI || mode == PATTERN_CHARGING_COMPLETE_50 || mode == PATTERN_CHARGING_50) { mode = num_patterns - (PATTERN_CHARGING_50 - mode); } #endif #endif if (mode > num_patterns || !(chip->pdata->patterns)) { chip->id_pattern_play = PATTERN_OFF; LP5521_INFO_MSG("[%s] invalid pattern!", __func__); return; } if (mode == PATTERN_OFF) { lp5521_write(cl, LP5521_REG_ENABLE, LP5521_ENABLE_DEFAULT); usleep_range(1000, 2000); lp5521_write(cl, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); LP5521_INFO_MSG("[%s] PATTERN_PLAY_OFF", __func__); } else { ptn = lp5521_get_pattern(chip, mode); if (!ptn) return; _run_led_pattern(chip, ptn); LP5521_INFO_MSG("[%s] PATTERN_PLAY_ON", __func__); } } static u8 get_led_current_value(u8 current_index) { return current_index_mapped_value[current_index]; } static ssize_t show_led_pattern(struct device *dev, struct device_attribute *attr, char *buf) { struct lp5521_chip *chip = i2c_get_clientdata(to_i2c_client(dev)); return sprintf(buf, "%d\n", chip->id_pattern_play); } static ssize_t store_led_pattern(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct lp5521_chip *chip = i2c_get_clientdata(to_i2c_client(dev)); unsigned long val; int ret; LP5521_INFO_MSG("[%s] pattern id : %s", __func__, buf); ret = strict_strtoul(buf, 10, &val); if (ret) return ret; lp5521_run_led_pattern(val, chip); return len; } static ssize_t show_led_current_index(struct device *dev, struct device_attribute *attr, char *buf) { struct lp5521_chip *chip = i2c_get_clientdata(to_i2c_client(dev)); if (!chip) return 0; return sprintf(buf, "%d\n", chip->current_index); } static ssize_t store_led_current_index(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct lp5521_chip *chip = i2c_get_clientdata(to_i2c_client(dev)); unsigned long val; int ret, i; u8 max_current, modify_current; LP5521_INFO_MSG("[%s] current index (0~255) : %s", __func__, buf); ret = strict_strtoul(buf, 10, &val); if (ret) return ret; if (val > PATTERN_CURRENT_INDEX_STEP_HAL || val < 0) return -EINVAL; if (!chip) return 0; #if defined(CONFIG_MACH_MSM8974_G3_LGU) || defined(CONFIG_MACH_MSM8974_G3_SKT) || defined(CONFIG_MACH_MSM8974_G3_KT) || defined(CONFIG_MACH_MSM8974_G3_ATT) || defined(CONFIG_MACH_MSM8974_G3_VZW) || defined(CONFIG_MACH_MSM8974_G3_SPR_US) || defined(CONFIG_MACH_MSM8974_G3_USC_US) || defined(CONFIG_MACH_MSM8974_G3_ACG_US) || defined(CONFIG_MACH_MSM8974_G3_TMO_US) || defined(CONFIG_MACH_MSM8974_G3_GLOBAL_COM) || defined(CONFIG_MACH_MSM8974_G3_CN) || defined(CONFIG_MACH_MSM8974_G3_CA) || defined(CONFIG_MACH_MSM8974_G3_LRA) || defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W) LP5521_INFO_MSG("[%s] prevent change current_index", __func__); return 0; #endif chip->current_index = val; mutex_lock(&chip->lock); for (i = 0; i < LP5521_MAX_LEDS ; i++) { max_current = chip->leds[i].max_current; modify_current = get_led_current_value(val); if (modify_current > max_current) modify_current = max_current; LP5521_INFO_MSG("[%s] modify_current : %d", __func__, modify_current); ret = lp5521_set_led_current(chip, i, modify_current); if (ret) break; chip->leds[i].led_current = modify_current; } mutex_unlock(&chip->lock); if (ret) return ret; return len; } static void _set_pwm_cmd(struct lp5521_pattern_cmd *cmd, unsigned int color) { u8 r = (color >> 16) & 0xFF; u8 g = (color >> 8) & 0xFF; u8 b = color & 0xFF; cmd->r[cmd->pc_r++] = CMD_SET_PWM; cmd->r[cmd->pc_r++] = r; cmd->g[cmd->pc_g++] = CMD_SET_PWM; cmd->g[cmd->pc_g++] = g; cmd->b[cmd->pc_b++] = CMD_SET_PWM; cmd->b[cmd->pc_b++] = b; } static enum lp5521_wait_type _find_wait_cycle_type(unsigned int ms) { int i; for (i = LP5521_CYCLE_50ms ; i < LP5521_CYCLE_MAX ; i++) { if (ms > lp5521_wait_params[i-1].limit && ms <= lp5521_wait_params[i].limit) return i; } return LP5521_CYCLE_INVALID; } static void _set_wait_cmd(struct lp5521_pattern_cmd *cmd, unsigned int ms, u8 jump, unsigned int off) { enum lp5521_wait_type type = _find_wait_cycle_type(ms); unsigned int loop = ms / lp5521_wait_params[type].cycle; u8 cmd_msb = lp5521_wait_params[type].cmd; u8 msb; u8 lsb; u16 branch; WARN_ON(!cmd_msb); WARN_ON(loop > 64); if (off) { if (loop > 1) { if (loop > 128) loop = 128; lsb = ((loop-1) & 0xff) | 0x80; /* wait command */ cmd->r[cmd->pc_r++] = cmd_msb; cmd->r[cmd->pc_r++] = lsb; cmd->g[cmd->pc_g++] = cmd_msb; cmd->g[cmd->pc_g++] = lsb; cmd->b[cmd->pc_b++] = cmd_msb; cmd->b[cmd->pc_b++] = lsb; } else { /* wait command */ cmd->r[cmd->pc_r++] = cmd_msb; cmd->r[cmd->pc_r++] = CMD_WAIT_LSB; cmd->g[cmd->pc_g++] = cmd_msb; cmd->g[cmd->pc_g++] = CMD_WAIT_LSB; cmd->b[cmd->pc_b++] = cmd_msb; cmd->b[cmd->pc_b++] = CMD_WAIT_LSB; } } else { /* wait command */ cmd->r[cmd->pc_r++] = cmd_msb; cmd->r[cmd->pc_r++] = CMD_WAIT_LSB; cmd->g[cmd->pc_g++] = cmd_msb; cmd->g[cmd->pc_g++] = CMD_WAIT_LSB; cmd->b[cmd->pc_b++] = cmd_msb; cmd->b[cmd->pc_b++] = CMD_WAIT_LSB; /* branch command : if wait time is bigger than cycle msec, branch is used for command looping */ if (loop > 1) { branch = (5 << 13) | ((loop - 1) << 7) | jump; msb = (branch >> 8) & 0xFF; lsb = branch & 0xFF; cmd->r[cmd->pc_r++] = msb; cmd->r[cmd->pc_r++] = lsb; cmd->g[cmd->pc_g++] = msb; cmd->g[cmd->pc_g++] = lsb; cmd->b[cmd->pc_b++] = msb; cmd->b[cmd->pc_b++] = lsb; } } } static inline bool _is_pc_overflow(struct lp5521_led_pattern *ptn) { return (ptn->size_r >= LP5521_PROGRAM_LENGTH || ptn->size_g >= LP5521_PROGRAM_LENGTH || ptn->size_b >= LP5521_PROGRAM_LENGTH); } static ssize_t store_led_blink(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct lp5521_chip *chip = i2c_get_clientdata(to_i2c_client(dev)); unsigned int rgb = 0; unsigned int on = 0; unsigned int off = 0; struct lp5521_led_pattern ptn = { }; struct lp5521_pattern_cmd cmd = { }; u8 jump_pc = 0; sscanf(buf, "0x%06x,%d,%d", &rgb, &on, &off); LP5521_INFO_MSG("[%s] rgb=0x%06x, on=%d, off=%d\n", __func__, rgb, on, off); lp5521_run_led_pattern(PATTERN_OFF, chip); on = min_t(unsigned int, on, MAX_BLINK_TIME); off = min_t(unsigned int, off, MAX_BLINK_TIME); if (!rgb || !on || !off) { chip->id_pattern_play = PATTERN_OFF; return len; } else { chip->id_pattern_play = PATTERN_BLINK_ON; } /* on */ _set_pwm_cmd(&cmd, rgb); _set_wait_cmd(&cmd, on, jump_pc, 0); jump_pc = cmd.pc_r / 2; /* 16bit size program counter */ /* off */ _set_pwm_cmd(&cmd, 0); _set_wait_cmd(&cmd, off, jump_pc, 1); ptn.r = cmd.r; ptn.size_r = cmd.pc_r; ptn.g = cmd.g; ptn.size_g = cmd.pc_g; ptn.b = cmd.b; ptn.size_b = cmd.pc_b; WARN_ON(_is_pc_overflow(&ptn)); _run_led_pattern(chip, &ptn); return len; } /* led class device attributes */ static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); static struct attribute *lp5521_led_attributes[] = { &dev_attr_led_current.attr, &dev_attr_max_current.attr, NULL, }; static struct attribute_group lp5521_led_attribute_group = { .attrs = lp5521_led_attributes }; /* device attributes */ static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, show_engine1_mode, store_engine1_mode); static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, show_engine2_mode, store_engine2_mode); static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, show_engine3_mode, store_engine3_mode); static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); static DEVICE_ATTR(led_pattern, S_IRUGO | S_IWUSR, show_led_pattern, store_led_pattern); static DEVICE_ATTR(led_blink, S_IRUGO | S_IWUSR, NULL, store_led_blink); static DEVICE_ATTR(led_current_index, S_IRUGO | S_IWUSR, show_led_current_index, store_led_current_index); static struct attribute *lp5521_attributes[] = { &dev_attr_engine1_mode.attr, &dev_attr_engine2_mode.attr, &dev_attr_engine3_mode.attr, &dev_attr_selftest.attr, &dev_attr_engine1_load.attr, &dev_attr_engine2_load.attr, &dev_attr_engine3_load.attr, &dev_attr_led_pattern.attr, &dev_attr_led_blink.attr, &dev_attr_led_current_index.attr, NULL }; static const struct attribute_group lp5521_group = { .attrs = lp5521_attributes, }; static int lp5521_register_sysfs(struct i2c_client *client) { struct device *dev = &client->dev; return sysfs_create_group(&dev->kobj, &lp5521_group); } static void lp5521_unregister_sysfs(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); struct device *dev = &client->dev; int i; sysfs_remove_group(&dev->kobj, &lp5521_group); for (i = 0; i < chip->num_leds; i++) sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, &lp5521_led_attribute_group); } static int lp5521_init_led(struct lp5521_led *led, struct i2c_client *client, int chan, struct lp5521_platform_data pdata) { struct device *dev = &client->dev; char name[32]; int res; if (chan >= LP5521_MAX_LEDS) return -EINVAL; if (lp5521_pdata.led_config[chan].led_current == 0) return 0; led->led_current = lp5521_pdata.led_config[chan].led_current; led->max_current = lp5521_pdata.led_config[chan].max_current; led->chan_nr = lp5521_pdata.led_config[chan].chan_nr; if (led->chan_nr >= LP5521_MAX_LEDS) { dev_err(dev, "Use channel numbers between 0 and %d\n", LP5521_MAX_LEDS - 1); return -EINVAL; } led->cdev.brightness_set = lp5521_set_brightness; if (lp5521_pdata.led_config[chan].name) { led->cdev.name = lp5521_pdata.led_config[chan].name; } else { snprintf(name, sizeof(name), "%s:channel%d", lp5521_pdata.label ?: client->name, chan); led->cdev.name = name; } res = led_classdev_register(dev, &led->cdev); if (res < 0) { dev_err(dev, "couldn't register led on channel %d\n", chan); return res; } res = sysfs_create_group(&led->cdev.dev->kobj, &lp5521_led_attribute_group); if (res < 0) { dev_err(dev, "couldn't register current attribute\n"); led_classdev_unregister(&led->cdev); return res; } return 0; } static int lp5521_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0, i, led; u8 buf = 0; LP5521_INFO_MSG("[%s] start\n", __func__); #ifdef CONFIG_OF if (&client->dev.of_node) { chip = devm_kzalloc(&client->dev, sizeof(struct lp5521_chip), GFP_KERNEL); if (!chip) { pr_err("%s: Failed to allocate memory\n", __func__); return -ENOMEM; } if (lge_get_board_revno() > HW_REV_EVB2) { chip->rgb_led_en = of_get_named_gpio(client->dev.of_node, "ti,led_en", 0); LP5521_INFO_MSG("chip->rgb_led_en ==%d \n", chip->rgb_led_en); if (!gpio_is_valid(chip->rgb_led_en)) { pr_err("Fail to get named gpio for rgb_led_en.\n"); goto fail0; } else { ret = gpio_request(chip->rgb_led_en, "rgb_led_en"); if (ret) { pr_err("request reset gpio failed, rc=%d\n", ret); gpio_free(chip->rgb_led_en); goto fail0; } } } } else { dev_err(&client->dev, "lp5521 probe of_node fail\n"); return -ENODEV; } #else chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); if (!chip) { LP5521_INFO_MSG("[%s] Can not allocate memory!\n", __func__); return -ENOMEM; } #endif i2c_set_clientdata(client, chip); chip->client = client; mutex_init(&chip->lock); #if defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W) if (lge_get_board_revno() < HW_REV_1_0) { lp5521_pdata.led_config = lp5521_led_config_rev_c; LP5521_INFO_MSG("[%s] lp5521_led_config_rev_c!\n", __func__); } else if (lge_get_board_revno() <= HW_REV_B) { lp5521_pdata.led_config = lp5521_led_config_rev_b; LP5521_INFO_MSG("[%s] lp5521_led_config_rev_b!\n", __func__); } else{ } #endif chip->pdata = &lp5521_pdata; gpio_set_value((chip->rgb_led_en), 0); usleep_range(1000, 2000); /* Keep enable down at least 1ms */ gpio_set_value((chip->rgb_led_en), 1); usleep_range(1000, 2000); /* Keep enable down at least 1ms */ lp5521_write(client, LP5521_REG_RESET, 0xff); usleep_range(10000, 20000); /* * Exact value is not available. 10 - 20ms * appears to be enough for reset. */ /* * Make sure that the chip is reset by reading back the r channel * current reg. This is dummy read is required on some platforms - * otherwise further access to the R G B channels in the * LP5521_REG_ENABLE register will not have any effect - strange! */ ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf); if (ret || buf != LP5521_REG_R_CURR_DEFAULT) { dev_err(&client->dev, "error in resetting chip\n"); goto fail2; } usleep_range(10000, 20000); ret = lp5521_detect(client); if (ret) { dev_err(&client->dev, "Chip not found\n"); goto fail2; } dev_info(&client->dev, "%s programmable led chip found\n", id->name); ret = lp5521_configure(client); if (ret < 0) { dev_err(&client->dev, "error configuring chip\n"); goto fail1; } /* Initialize leds */ chip->num_channels = lp5521_pdata.num_channels; chip->num_leds = 0; led = 0; for (i = 0; i < lp5521_pdata.num_channels; i++) { /* Do not initialize channels that are not connected */ if (lp5521_pdata.led_config[i].led_current == 0) continue; ret = lp5521_init_led(&chip->leds[led], client, i, lp5521_pdata); if (ret) { dev_err(&client->dev, "error initializing leds\n"); goto fail2; } chip->num_leds++; chip->leds[led].id = led; /* Set initial LED current */ lp5521_set_led_current(chip, led, chip->leds[led].led_current); INIT_WORK(&(chip->leds[led].brightness_work), lp5521_led_brightness_work); led++; } /* Initialize current index for auto brightness (max step) */ chip->current_index = PATTERN_CURRENT_INDEX_STEP_HAL; ret = lp5521_register_sysfs(client); if (ret) { dev_err(&client->dev, "registering sysfs failed\n"); goto fail2; } #if !(defined(CONFIG_MACH_MSM8974_G3_LGU) || defined(CONFIG_MACH_MSM8974_G3_SKT) || defined(CONFIG_MACH_MSM8974_G3_KT) || defined(CONFIG_MACH_MSM8974_G3_ATT) || defined(CONFIG_MACH_MSM8974_G3_VZW) || defined(CONFIG_MACH_MSM8974_G3_SPR_US) || defined(CONFIG_MACH_MSM8974_G3_USC_US) || defined(CONFIG_MACH_MSM8974_G3_ACG_US) || defined(CONFIG_MACH_MSM8974_G3_TMO_US) || defined(CONFIG_MACH_MSM8974_G3_GLOBAL_COM) || defined(CONFIG_MACH_MSM8974_G3_CN) || defined(CONFIG_MACH_MSM8974_G3_CA) || defined(CONFIG_MACH_MSM8974_G3_LRA) || defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W)) lp5521_run_led_pattern(1, chip); /* 1: Power On pattern number */ LP5521_INFO_MSG("[%s] pattern id : 1(Power on)", __func__); LP5521_INFO_MSG("[%s] complete\n", __func__); #endif return ret; fail2: for (i = 0; i < chip->num_leds; i++) { led_classdev_unregister(&chip->leds[i].cdev); cancel_work_sync(&chip->leds[i].brightness_work); } fail1: if (lge_get_board_revno() > HW_REV_EVB2) { if (lp5521_pdata.enable) lp5521_pdata.enable(0); } fail0: gpio_free(chip->rgb_led_en); return ret; } static int lp5521_remove(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int i; LP5521_INFO_MSG("[%s] start\n", __func__); lp5521_run_led_pattern(PATTERN_OFF, chip); lp5521_unregister_sysfs(client); for (i = 0; i < chip->num_leds; i++) { led_classdev_unregister(&chip->leds[i].cdev); cancel_work_sync(&chip->leds[i].brightness_work); } if (lge_get_board_revno() > HW_REV_EVB2) { if (chip->pdata->enable) chip->pdata->enable(0); } if (chip->pdata->release_resources) chip->pdata->release_resources(); devm_kfree(&client->dev, chip); LP5521_INFO_MSG("[%s] complete\n", __func__); return 0; } static void lp5521_shutdown(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int i; if (!chip) { LP5521_INFO_MSG("[%s] null pointer check!\n", __func__); return; } LP5521_INFO_MSG("[%s] start\n", __func__); lp5521_set_led_current(chip, 0, 0); lp5521_set_led_current(chip, 1, 0); lp5521_set_led_current(chip, 2, 0); lp5521_run_led_pattern(PATTERN_OFF, chip); lp5521_unregister_sysfs(client); for (i = 0; i < chip->num_leds; i++) { led_classdev_unregister(&chip->leds[i].cdev); cancel_work_sync(&chip->leds[i].brightness_work); } if (lge_get_board_revno() > HW_REV_EVB2) { if (chip->pdata->enable) chip->pdata->enable(0); } if (chip->pdata->release_resources) chip->pdata->release_resources(); devm_kfree(&client->dev, chip); LP5521_INFO_MSG("[%s] complete\n", __func__); } static int lp5521_suspend(struct i2c_client *client, pm_message_t mesg) { struct lp5521_chip *chip = i2c_get_clientdata(client); if (!chip || !chip->pdata) { LP5521_INFO_MSG("[%s] null pointer check!\n", __func__); return 0; } LP5521_INFO_MSG("[%s] id_pattern_play = %d\n", __func__, chip->id_pattern_play); if (lge_get_board_revno() > HW_REV_EVB2) { if (chip->pdata->enable && chip->id_pattern_play == PATTERN_OFF) { LP5521_INFO_MSG("[%s] RGB_EN set to LOW\n", __func__); chip->pdata->enable(0); } } return 0; } static int lp5521_resume(struct i2c_client *client) { struct lp5521_chip *chip = i2c_get_clientdata(client); int ret = 0; if (!chip || !chip->pdata) { LP5521_INFO_MSG("[%s] null pointer check!\n", __func__); return 0; } LP5521_INFO_MSG("[%s] id_pattern_play = %d\n", __func__, chip->id_pattern_play); if (lge_get_board_revno() > HW_REV_EVB2) { if (chip->pdata->enable && chip->id_pattern_play == PATTERN_OFF) { LP5521_INFO_MSG("[%s] RGB_EN set to HIGH\n", __func__); chip->pdata->enable(0); usleep_range(1000, 2000); /* Keep enable down at least 1ms */ chip->pdata->enable(1); usleep_range(1000, 2000); /* 500us abs min. */ #if !(defined(CONFIG_MACH_MSM8974_G3_LGU) || defined(CONFIG_MACH_MSM8974_G3_SKT) || defined(CONFIG_MACH_MSM8974_G3_KT) || defined(CONFIG_MACH_MSM8974_G3_ATT) || defined(CONFIG_MACH_MSM8974_G3_VZW) || defined(CONFIG_MACH_MSM8974_G3_SPR_US) || defined(CONFIG_MACH_MSM8974_G3_USC_US) || defined(CONFIG_MACH_MSM8974_G3_ACG_US) || defined(CONFIG_MACH_MSM8974_G3_TMO_US) || defined(CONFIG_MACH_MSM8974_G3_GLOBAL_COM) || defined(CONFIG_MACH_MSM8974_G3_CN) || defined(CONFIG_MACH_MSM8974_G3_CA) || defined(CONFIG_MACH_MSM8974_G3_LRA) || defined(CONFIG_MACH_MSM8974_B1_KR) || defined(CONFIG_MACH_MSM8974_B1W)) lp5521_write(client, LP5521_REG_RESET, 0xff); usleep_range(10000, 20000); ret = lp5521_configure(client); if (ret < 0) { dev_err(&client->dev, "error configuring chip\n"); } #endif } } else { if (chip->id_pattern_play == PATTERN_OFF) { LP5521_INFO_MSG("[%s] RGB_EN set to HIGH\n", __func__); lp5521_write(client, LP5521_REG_RESET, 0xff); usleep_range(10000, 20000); ret = lp5521_configure(client); if (ret < 0) { dev_err(&client->dev, "error configuring chip\n"); } } } return ret; } static const struct i2c_device_id lp5521_id[] = { { "lp5521", 0 }, /* Three channel chip */ { } }; #ifdef CONFIG_OF static struct of_device_id lp5521_match_table[] = { { .compatible = "ti,lp5521",}, { }, }; #endif static struct i2c_driver lp5521_driver = { .driver = { .owner = THIS_MODULE, .name = "lp5521", #ifdef CONFIG_OF .of_match_table = lp5521_match_table, #endif }, .probe = lp5521_probe, .remove = lp5521_remove, .shutdown = lp5521_shutdown, .suspend = lp5521_suspend, .resume = lp5521_resume, .id_table = lp5521_id, }; /* * module load/unload record keeping */ static int __init lp5521_dev_init(void) { return i2c_add_driver(&lp5521_driver); } module_init(lp5521_dev_init); static void __exit lp5521_dev_exit(void) { i2c_del_driver(&lp5521_driver); } module_exit(lp5521_dev_exit); MODULE_DEVICE_TABLE(i2c, lp5521_id); MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo"); MODULE_DESCRIPTION("LP5521 LED engine"); MODULE_LICENSE("GPL v2");
gpl-2.0
dr4go/lg.p990.kernel
security/keys/key.c
573
24950
/* Basic authentication token and access key management * * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poison.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/workqueue.h> #include <linux/random.h> #include <linux/err.h> #include <linux/user_namespace.h> #include "internal.h" static struct kmem_cache *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); struct rb_root key_user_tree; /* tree of quota records indexed by UID */ DEFINE_SPINLOCK(key_user_lock); unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ unsigned int key_quota_maxkeys = 200; /* general key count quota */ unsigned int key_quota_maxbytes = 20000; /* general key space quota */ static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); static void key_cleanup(struct work_struct *work); static DECLARE_WORK(key_cleanup_task, key_cleanup); /* we serialise key instantiation and link */ DEFINE_MUTEX(key_construction_mutex); /* any key who's type gets unegistered will be re-typed to this */ static struct key_type key_type_dead = { .name = "dead", }; #ifdef KEY_DEBUGGING void __key_check(const struct key *key) { printk("__key_check: key %p {%08x} should be {%08x}\n", key, key->magic, KEY_DEBUG_MAGIC); BUG(); } #endif /*****************************************************************************/ /* * get the key quota record for a user, allocating a new record if one doesn't * already exist */ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) { struct key_user *candidate = NULL, *user; struct rb_node *parent = NULL; struct rb_node **p; try_again: p = &key_user_tree.rb_node; spin_lock(&key_user_lock); /* search the tree for a user record with a matching UID */ while (*p) { parent = *p; user = rb_entry(parent, struct key_user, node); if (uid < user->uid) p = &(*p)->rb_left; else if (uid > user->uid) p = &(*p)->rb_right; else if (user_ns < user->user_ns) p = &(*p)->rb_left; else if (user_ns > user->user_ns) p = &(*p)->rb_right; else goto found; } /* if we get here, we failed to find a match in the tree */ if (!candidate) { /* allocate a candidate user record if we don't already have * one */ spin_unlock(&key_user_lock); user = NULL; candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); if (unlikely(!candidate)) goto out; /* the allocation may have scheduled, so we need to repeat the * search lest someone else added the record whilst we were * asleep */ goto try_again; } /* if we get here, then the user record still hadn't appeared on the * second pass - so we use the candidate record */ atomic_set(&candidate->usage, 1); atomic_set(&candidate->nkeys, 0); atomic_set(&candidate->nikeys, 0); candidate->uid = uid; candidate->user_ns = get_user_ns(user_ns); candidate->qnkeys = 0; candidate->qnbytes = 0; spin_lock_init(&candidate->lock); mutex_init(&candidate->cons_lock); rb_link_node(&candidate->node, parent, p); rb_insert_color(&candidate->node, &key_user_tree); spin_unlock(&key_user_lock); user = candidate; goto out; /* okay - we found a user record for this UID */ found: atomic_inc(&user->usage); spin_unlock(&key_user_lock); kfree(candidate); out: return user; } /* end key_user_lookup() */ /*****************************************************************************/ /* * dispose of a user structure */ void key_user_put(struct key_user *user) { if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { rb_erase(&user->node, &key_user_tree); spin_unlock(&key_user_lock); put_user_ns(user->user_ns); kfree(user); } } /* end key_user_put() */ /*****************************************************************************/ /* * assign a key the next unique serial number * - these are assigned randomly to avoid security issues through covert * channel problems */ static inline void key_alloc_serial(struct key *key) { struct rb_node *parent, **p; struct key *xkey; /* propose a random serial number and look for a hole for it in the * serial number tree */ do { get_random_bytes(&key->serial, sizeof(key->serial)); key->serial >>= 1; /* negative numbers are not permitted */ } while (key->serial < 3); spin_lock(&key_serial_lock); attempt_insertion: parent = NULL; p = &key_serial_tree.rb_node; while (*p) { parent = *p; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) p = &(*p)->rb_left; else if (key->serial > xkey->serial) p = &(*p)->rb_right; else goto serial_exists; } /* we've found a suitable hole - arrange for this key to occupy it */ rb_link_node(&key->serial_node, parent, p); rb_insert_color(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); return; /* we found a key with the proposed serial number - walk the tree from * that point looking for the next unused serial number */ serial_exists: for (;;) { key->serial++; if (key->serial < 3) { key->serial = 3; goto attempt_insertion; } parent = rb_next(parent); if (!parent) goto attempt_insertion; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) goto attempt_insertion; } } /* end key_alloc_serial() */ /*****************************************************************************/ /* * allocate a key of the specified type * - update the user's quota to reflect the existence of the key * - called from a key-type operation with key_types_sem read-locked by * key_create_or_update() * - this prevents unregistration of the key type * - upon return the key is as yet uninstantiated; the caller needs to either * instantiate the key or discard it before returning */ struct key *key_alloc(struct key_type *type, const char *desc, uid_t uid, gid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags) { struct key_user *user = NULL; struct key *key; size_t desclen, quotalen; int ret; key = ERR_PTR(-EINVAL); if (!desc || !*desc) goto error; desclen = strlen(desc) + 1; quotalen = desclen + type->def_datalen; /* get hold of the key tracking for this user */ user = key_user_lookup(uid, cred->user->user_ns); if (!user) goto no_memory_1; /* check that the user's quota permits allocation of another key and * its description */ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { unsigned maxkeys = (uid == 0) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = (uid == 0) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { if (user->qnkeys + 1 >= maxkeys || user->qnbytes + quotalen >= maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } user->qnkeys++; user->qnbytes += quotalen; spin_unlock(&user->lock); } /* allocate and initialise the key and its description */ key = kmem_cache_alloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; if (desc) { key->description = kmemdup(desc, desclen, GFP_KERNEL); if (!key->description) goto no_memory_3; } atomic_set(&key->usage, 1); init_rwsem(&key->sem); key->type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; key->uid = uid; key->gid = gid; key->perm = perm; key->flags = 0; key->expiry = 0; key->payload.data = NULL; key->security = NULL; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; memset(&key->type_data, 0, sizeof(key->type_data)); #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; #endif /* let the security module know about the key */ ret = security_key_alloc(key, cred, flags); if (ret < 0) goto security_error; /* publish the key by giving it a serial number */ atomic_inc(&user->nkeys); key_alloc_serial(key); error: return key; security_error: kfree(key->description); kmem_cache_free(key_jar, key); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); key = ERR_PTR(ret); goto error; no_memory_3: kmem_cache_free(key_jar, key); no_memory_2: if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); no_memory_1: key = ERR_PTR(-ENOMEM); goto error; no_quota: spin_unlock(&user->lock); key_user_put(user); key = ERR_PTR(-EDQUOT); goto error; } /* end key_alloc() */ EXPORT_SYMBOL(key_alloc); /*****************************************************************************/ /* * reserve an amount of quota for the key's payload */ int key_payload_reserve(struct key *key, size_t datalen) { int delta = (int) datalen - key->datalen; int ret = 0; key_check(key); /* contemplate the quota adjustment */ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxbytes = (key->user->uid == 0) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&key->user->lock); if (delta > 0 && (key->user->qnbytes + delta >= maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } else { key->user->qnbytes += delta; key->quotalen += delta; } spin_unlock(&key->user->lock); } /* change the recorded data length if that didn't generate an error */ if (ret == 0) key->datalen = datalen; return ret; } /* end key_payload_reserve() */ EXPORT_SYMBOL(key_payload_reserve); /*****************************************************************************/ /* * instantiate a key and link it into the target keyring atomically * - called with the target keyring's semaphore writelocked */ static int __key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey) { int ret, awaken; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* instantiate the key */ ret = key->type->instantiate(key, data, datalen); if (ret == 0) { /* mark the key as being instantiated */ atomic_inc(&key->user->nikeys); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; /* and link it into the destination keyring */ if (keyring) ret = __key_link(keyring, key); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } } mutex_unlock(&key_construction_mutex); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; } /* end __key_instantiate_and_link() */ /*****************************************************************************/ /* * instantiate a key and link it into the target keyring atomically */ int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey) { int ret; if (keyring) down_write(&keyring->sem); ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey); if (keyring) up_write(&keyring->sem); return ret; } /* end key_instantiate_and_link() */ EXPORT_SYMBOL(key_instantiate_and_link); /*****************************************************************************/ /* * negatively instantiate a key and link it into the target keyring atomically */ int key_negate_and_link(struct key *key, unsigned timeout, struct key *keyring, struct key *authkey) { struct timespec now; int ret, awaken; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) down_write(&keyring->sem); mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring) ret = __key_link(keyring, key); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring) up_write(&keyring->sem); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; } /* end key_negate_and_link() */ EXPORT_SYMBOL(key_negate_and_link); /*****************************************************************************/ /* * do cleaning up in process context so that we don't have to disable * interrupts all over the place */ static void key_cleanup(struct work_struct *work) { struct rb_node *_n; struct key *key; go_again: /* look for a dead key in the tree */ spin_lock(&key_serial_lock); for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { key = rb_entry(_n, struct key, serial_node); if (atomic_read(&key->usage) == 0) goto found_dead_key; } spin_unlock(&key_serial_lock); return; found_dead_key: /* we found a dead key - once we've removed it from the tree, we can * drop the lock */ rb_erase(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); key_check(key); security_key_free(key); /* deal with the user's key tracking and quota */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); key_user_put(key->user); /* now throw away the key memory */ if (key->type->destroy) key->type->destroy(key); kfree(key->description); #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC_X; #endif kmem_cache_free(key_jar, key); /* there may, of course, be more than one key to destroy */ goto go_again; } /* end key_cleanup() */ /*****************************************************************************/ /* * dispose of a reference to a key * - when all the references are gone, we schedule the cleanup task to come and * pull it out of the tree in definite process context */ void key_put(struct key *key) { if (key) { key_check(key); if (atomic_dec_and_test(&key->usage)) schedule_work(&key_cleanup_task); } } /* end key_put() */ EXPORT_SYMBOL(key_put); /*****************************************************************************/ /* * find a key by its serial number */ struct key *key_lookup(key_serial_t id) { struct rb_node *n; struct key *key; spin_lock(&key_serial_lock); /* search the tree for the specified key */ n = key_serial_tree.rb_node; while (n) { key = rb_entry(n, struct key, serial_node); if (id < key->serial) n = n->rb_left; else if (id > key->serial) n = n->rb_right; else goto found; } not_found: key = ERR_PTR(-ENOKEY); goto error; found: /* pretend it doesn't exist if it is awaiting deletion */ if (atomic_read(&key->usage) == 0) goto not_found; /* this races with key_put(), but that doesn't matter since key_put() * doesn't actually change the key */ atomic_inc(&key->usage); error: spin_unlock(&key_serial_lock); return key; } /* end key_lookup() */ /*****************************************************************************/ /* * find and lock the specified key type against removal * - we return with the sem readlocked */ struct key_type *key_type_lookup(const char *type) { struct key_type *ktype; down_read(&key_types_sem); /* look up the key type to see if it's one of the registered kernel * types */ list_for_each_entry(ktype, &key_types_list, link) { if (strcmp(ktype->name, type) == 0) goto found_kernel_type; } up_read(&key_types_sem); ktype = ERR_PTR(-ENOKEY); found_kernel_type: return ktype; } /* end key_type_lookup() */ /*****************************************************************************/ /* * unlock a key type */ void key_type_put(struct key_type *ktype) { up_read(&key_types_sem); } /* end key_type_put() */ /*****************************************************************************/ /* * attempt to update an existing key * - the key has an incremented refcount * - we need to put the key if we get an error */ static inline key_ref_t __key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = key_permission(key_ref, KEY_WRITE); if (ret < 0) goto error; ret = -EEXIST; if (!key->type->update) goto error; down_write(&key->sem); ret = key->type->update(key, payload, plen); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); if (ret < 0) goto error; out: return key_ref; error: key_put(key); key_ref = ERR_PTR(ret); goto out; } /* end __key_update() */ /*****************************************************************************/ /* * search the specified keyring for a key of the same description; if one is * found, update it, otherwise add a new one */ key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { const struct cred *cred = current_cred(); struct key_type *ktype; struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; /* look up the key type to see if it's one of the registered kernel * types */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!ktype->match || !ktype->instantiate) goto error_2; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_2; down_write(&keyring->sem); /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_3; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (ktype->update) { key_ref = __keyring_search_one(keyring_ref, ktype, description, 0); if (!IS_ERR(key_ref)) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; if (ktype->read) perm |= KEY_POS_READ | KEY_USR_READ; if (ktype == &key_type_keyring || ktype->update) perm |= KEY_USR_WRITE; } /* allocate a new key */ key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_3; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_3; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_3: up_write(&keyring->sem); error_2: key_type_put(ktype); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ up_write(&keyring->sem); key_type_put(ktype); key_ref = __key_update(key_ref, payload, plen); goto error; } /* end key_create_or_update() */ EXPORT_SYMBOL(key_create_or_update); /*****************************************************************************/ /* * update a key */ int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_WRITE); if (ret < 0) goto error; /* attempt to update it if supported */ ret = -EOPNOTSUPP; if (key->type->update) { down_write(&key->sem); ret = key->type->update(key, payload, plen); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); } error: return ret; } /* end key_update() */ EXPORT_SYMBOL(key_update); /*****************************************************************************/ /* * revoke a key */ void key_revoke(struct key *key) { struct timespec now; time_t time; key_check(key); /* make sure no one's trying to change or use the key when we mark it * - we tell lockdep that we might nest because we might be revoking an * authorisation key whilst holding the sem on a key we've just * instantiated */ down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && key->type->revoke) key->type->revoke(key); /* set the death time to no more than the expiry time */ now = current_kernel_time(); time = now.tv_sec; if (key->revoked_at == 0 || key->revoked_at > time) { key->revoked_at = time; key_schedule_gc(key->revoked_at + key_gc_delay); } up_write(&key->sem); } /* end key_revoke() */ EXPORT_SYMBOL(key_revoke); /*****************************************************************************/ /* * register a type of key */ int register_key_type(struct key_type *ktype) { struct key_type *p; int ret; ret = -EEXIST; down_write(&key_types_sem); /* disallow key types with the same name */ list_for_each_entry(p, &key_types_list, link) { if (strcmp(p->name, ktype->name) == 0) goto out; } /* store the type */ list_add(&ktype->link, &key_types_list); ret = 0; out: up_write(&key_types_sem); return ret; } /* end register_key_type() */ EXPORT_SYMBOL(register_key_type); /*****************************************************************************/ /* * unregister a type of key */ void unregister_key_type(struct key_type *ktype) { struct rb_node *_n; struct key *key; down_write(&key_types_sem); /* withdraw the key type */ list_del_init(&ktype->link); /* mark all the keys of this type dead */ spin_lock(&key_serial_lock); for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { key = rb_entry(_n, struct key, serial_node); if (key->type == ktype) { key->type = &key_type_dead; set_bit(KEY_FLAG_DEAD, &key->flags); } } spin_unlock(&key_serial_lock); /* make sure everyone revalidates their keys */ synchronize_rcu(); /* we should now be able to destroy the payloads of all the keys of * this type with impunity */ spin_lock(&key_serial_lock); for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { key = rb_entry(_n, struct key, serial_node); if (key->type == ktype) { if (ktype->destroy) ktype->destroy(key); memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); } } spin_unlock(&key_serial_lock); up_write(&key_types_sem); key_schedule_gc(0); } /* end unregister_key_type() */ EXPORT_SYMBOL(unregister_key_type); /*****************************************************************************/ /* * initialise the key management stuff */ void __init key_init(void) { /* allocate a slab in which we can store keys */ key_jar = kmem_cache_create("key_jar", sizeof(struct key), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* add the special key types */ list_add_tail(&key_type_keyring.link, &key_types_list); list_add_tail(&key_type_dead.link, &key_types_list); list_add_tail(&key_type_user.link, &key_types_list); /* record the root user tracking */ rb_link_node(&root_key_user.node, NULL, &key_user_tree.rb_node); rb_insert_color(&root_key_user.node, &key_user_tree); } /* end key_init() */
gpl-2.0
virt2real/linux-davinci
security/selinux/selinuxfs.c
1341
43790
/* Updated: Karl MacMillan <kmacmillan@tresys.com> * * Added conditional policy language extensions * * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support for the policy capability bitmap * * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. * Copyright (C) 2003 - 2004 Tresys Technology, LLC * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/string.h> #include <linux/security.h> #include <linux/major.h> #include <linux/seq_file.h> #include <linux/percpu.h> #include <linux/audit.h> #include <linux/uaccess.h> #include <linux/kobject.h> #include <linux/ctype.h> /* selinuxfs pseudo filesystem for exporting the security policy API. Based on the proc code and the fs/nfsd/nfsctl.c code. */ #include "flask.h" #include "avc.h" #include "avc_ss.h" #include "security.h" #include "objsec.h" #include "conditional.h" /* Policy capability filenames */ static char *policycap_names[] = { "network_peer_controls", "open_perms" }; unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; static int __init checkreqprot_setup(char *str) { unsigned long checkreqprot; if (!strict_strtoul(str, 0, &checkreqprot)) selinux_checkreqprot = checkreqprot ? 1 : 0; return 1; } __setup("checkreqprot=", checkreqprot_setup); static DEFINE_MUTEX(sel_mutex); /* global data for booleans */ static struct dentry *bool_dir; static int bool_num; static char **bool_pending_names; static int *bool_pending_values; /* global data for classes */ static struct dentry *class_dir; static unsigned long last_class_ino; static char policy_opened; /* global data for policy capabilities */ static struct dentry *policycap_dir; /* Check whether a task is allowed to use a security operation. */ static int task_has_security(struct task_struct *tsk, u32 perms) { const struct task_security_struct *tsec; u32 sid = 0; rcu_read_lock(); tsec = __task_cred(tsk)->security; if (tsec) sid = tsec->sid; rcu_read_unlock(); if (!tsec) return -EACCES; return avc_has_perm(sid, SECINITSID_SECURITY, SECCLASS_SECURITY, perms, NULL); } enum sel_inos { SEL_ROOT_INO = 2, SEL_LOAD, /* load policy */ SEL_ENFORCE, /* get or set enforcing status */ SEL_CONTEXT, /* validate context */ SEL_ACCESS, /* compute access decision */ SEL_CREATE, /* compute create labeling decision */ SEL_RELABEL, /* compute relabeling decision */ SEL_USER, /* compute reachable user contexts */ SEL_POLICYVERS, /* return policy version for this kernel */ SEL_COMMIT_BOOLS, /* commit new boolean values */ SEL_MLS, /* return if MLS policy is enabled */ SEL_DISABLE, /* disable SELinux until next reboot */ SEL_MEMBER, /* compute polyinstantiation membership decision */ SEL_CHECKREQPROT, /* check requested protection, not kernel-applied one */ SEL_COMPAT_NET, /* whether to use old compat network packet controls */ SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */ SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */ SEL_STATUS, /* export current status using mmap() */ SEL_POLICY, /* allow userspace to read the in kernel policy */ SEL_INO_NEXT, /* The next inode number to use */ }; static unsigned long sel_last_ino = SEL_INO_NEXT - 1; #define SEL_INITCON_INO_OFFSET 0x01000000 #define SEL_BOOL_INO_OFFSET 0x02000000 #define SEL_CLASS_INO_OFFSET 0x04000000 #define SEL_POLICYCAP_INO_OFFSET 0x08000000 #define SEL_INO_MASK 0x00ffffff #define TMPBUFLEN 12 static ssize_t sel_read_enforce(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char tmpbuf[TMPBUFLEN]; ssize_t length; length = scnprintf(tmpbuf, TMPBUFLEN, "%d", selinux_enforcing); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } #ifdef CONFIG_SECURITY_SELINUX_DEVELOP static ssize_t sel_write_enforce(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *page = NULL; ssize_t length; int new_value; length = -ENOMEM; if (count >= PAGE_SIZE) goto out; /* No partial writes. */ length = EINVAL; if (*ppos != 0) goto out; length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; length = -EFAULT; if (copy_from_user(page, buf, count)) goto out; length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; if (new_value != selinux_enforcing) { length = task_has_security(current, SECURITY__SETENFORCE); if (length) goto out; audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS, "enforcing=%d old_enforcing=%d auid=%u ses=%u", new_value, selinux_enforcing, from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); selinux_enforcing = new_value; if (selinux_enforcing) avc_ss_reset(0); selnl_notify_setenforce(selinux_enforcing); selinux_status_update_setenforce(selinux_enforcing); } length = count; out: free_page((unsigned long) page); return length; } #else #define sel_write_enforce NULL #endif static const struct file_operations sel_enforce_ops = { .read = sel_read_enforce, .write = sel_write_enforce, .llseek = generic_file_llseek, }; static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char tmpbuf[TMPBUFLEN]; ssize_t length; ino_t ino = file_inode(filp)->i_ino; int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ? security_get_reject_unknown() : !security_get_allow_unknown(); length = scnprintf(tmpbuf, TMPBUFLEN, "%d", handle_unknown); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static const struct file_operations sel_handle_unknown_ops = { .read = sel_read_handle_unknown, .llseek = generic_file_llseek, }; static int sel_open_handle_status(struct inode *inode, struct file *filp) { struct page *status = selinux_kernel_status_page(); if (!status) return -ENOMEM; filp->private_data = status; return 0; } static ssize_t sel_read_handle_status(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct page *status = filp->private_data; BUG_ON(!status); return simple_read_from_buffer(buf, count, ppos, page_address(status), sizeof(struct selinux_kernel_status)); } static int sel_mmap_handle_status(struct file *filp, struct vm_area_struct *vma) { struct page *status = filp->private_data; unsigned long size = vma->vm_end - vma->vm_start; BUG_ON(!status); /* only allows one page from the head */ if (vma->vm_pgoff > 0 || size != PAGE_SIZE) return -EIO; /* disallow writable mapping */ if (vma->vm_flags & VM_WRITE) return -EPERM; /* disallow mprotect() turns it into writable */ vma->vm_flags &= ~VM_MAYWRITE; return remap_pfn_range(vma, vma->vm_start, page_to_pfn(status), size, vma->vm_page_prot); } static const struct file_operations sel_handle_status_ops = { .open = sel_open_handle_status, .read = sel_read_handle_status, .mmap = sel_mmap_handle_status, .llseek = generic_file_llseek, }; #ifdef CONFIG_SECURITY_SELINUX_DISABLE static ssize_t sel_write_disable(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *page = NULL; ssize_t length; int new_value; length = -ENOMEM; if (count >= PAGE_SIZE) goto out; /* No partial writes. */ length = -EINVAL; if (*ppos != 0) goto out; length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; length = -EFAULT; if (copy_from_user(page, buf, count)) goto out; length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; if (new_value) { length = selinux_disable(); if (length) goto out; audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS, "selinux=0 auid=%u ses=%u", from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); } length = count; out: free_page((unsigned long) page); return length; } #else #define sel_write_disable NULL #endif static const struct file_operations sel_disable_ops = { .write = sel_write_disable, .llseek = generic_file_llseek, }; static ssize_t sel_read_policyvers(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char tmpbuf[TMPBUFLEN]; ssize_t length; length = scnprintf(tmpbuf, TMPBUFLEN, "%u", POLICYDB_VERSION_MAX); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static const struct file_operations sel_policyvers_ops = { .read = sel_read_policyvers, .llseek = generic_file_llseek, }; /* declaration for sel_write_load */ static int sel_make_bools(void); static int sel_make_classes(void); static int sel_make_policycap(void); /* declaration for sel_make_class_dirs */ static struct dentry *sel_make_dir(struct dentry *dir, const char *name, unsigned long *ino); static ssize_t sel_read_mls(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char tmpbuf[TMPBUFLEN]; ssize_t length; length = scnprintf(tmpbuf, TMPBUFLEN, "%d", security_mls_enabled()); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static const struct file_operations sel_mls_ops = { .read = sel_read_mls, .llseek = generic_file_llseek, }; struct policy_load_memory { size_t len; void *data; }; static int sel_open_policy(struct inode *inode, struct file *filp) { struct policy_load_memory *plm = NULL; int rc; BUG_ON(filp->private_data); mutex_lock(&sel_mutex); rc = task_has_security(current, SECURITY__READ_POLICY); if (rc) goto err; rc = -EBUSY; if (policy_opened) goto err; rc = -ENOMEM; plm = kzalloc(sizeof(*plm), GFP_KERNEL); if (!plm) goto err; if (i_size_read(inode) != security_policydb_len()) { mutex_lock(&inode->i_mutex); i_size_write(inode, security_policydb_len()); mutex_unlock(&inode->i_mutex); } rc = security_read_policy(&plm->data, &plm->len); if (rc) goto err; policy_opened = 1; filp->private_data = plm; mutex_unlock(&sel_mutex); return 0; err: mutex_unlock(&sel_mutex); if (plm) vfree(plm->data); kfree(plm); return rc; } static int sel_release_policy(struct inode *inode, struct file *filp) { struct policy_load_memory *plm = filp->private_data; BUG_ON(!plm); policy_opened = 0; vfree(plm->data); kfree(plm); return 0; } static ssize_t sel_read_policy(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct policy_load_memory *plm = filp->private_data; int ret; mutex_lock(&sel_mutex); ret = task_has_security(current, SECURITY__READ_POLICY); if (ret) goto out; ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len); out: mutex_unlock(&sel_mutex); return ret; } static int sel_mmap_policy_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct policy_load_memory *plm = vma->vm_file->private_data; unsigned long offset; struct page *page; if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE)) return VM_FAULT_SIGBUS; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= roundup(plm->len, PAGE_SIZE)) return VM_FAULT_SIGBUS; page = vmalloc_to_page(plm->data + offset); get_page(page); vmf->page = page; return 0; } static struct vm_operations_struct sel_mmap_policy_ops = { .fault = sel_mmap_policy_fault, .page_mkwrite = sel_mmap_policy_fault, }; static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma) { if (vma->vm_flags & VM_SHARED) { /* do not allow mprotect to make mapping writable */ vma->vm_flags &= ~VM_MAYWRITE; if (vma->vm_flags & VM_WRITE) return -EACCES; } vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &sel_mmap_policy_ops; return 0; } static const struct file_operations sel_policy_ops = { .open = sel_open_policy, .read = sel_read_policy, .mmap = sel_mmap_policy, .release = sel_release_policy, .llseek = generic_file_llseek, }; static ssize_t sel_write_load(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { ssize_t length; void *data = NULL; mutex_lock(&sel_mutex); length = task_has_security(current, SECURITY__LOAD_POLICY); if (length) goto out; /* No partial writes. */ length = -EINVAL; if (*ppos != 0) goto out; length = -EFBIG; if (count > 64 * 1024 * 1024) goto out; length = -ENOMEM; data = vmalloc(count); if (!data) goto out; length = -EFAULT; if (copy_from_user(data, buf, count) != 0) goto out; length = security_load_policy(data, count); if (length) goto out; length = sel_make_bools(); if (length) goto out1; length = sel_make_classes(); if (length) goto out1; length = sel_make_policycap(); if (length) goto out1; length = count; out1: audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD, "policy loaded auid=%u ses=%u", from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); out: mutex_unlock(&sel_mutex); vfree(data); return length; } static const struct file_operations sel_load_ops = { .write = sel_write_load, .llseek = generic_file_llseek, }; static ssize_t sel_write_context(struct file *file, char *buf, size_t size) { char *canon = NULL; u32 sid, len; ssize_t length; length = task_has_security(current, SECURITY__CHECK_CONTEXT); if (length) goto out; length = security_context_to_sid(buf, size, &sid); if (length) goto out; length = security_sid_to_context(sid, &canon, &len); if (length) goto out; length = -ERANGE; if (len > SIMPLE_TRANSACTION_LIMIT) { printk(KERN_ERR "SELinux: %s: context size (%u) exceeds " "payload max\n", __func__, len); goto out; } memcpy(buf, canon, len); length = len; out: kfree(canon); return length; } static ssize_t sel_read_checkreqprot(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char tmpbuf[TMPBUFLEN]; ssize_t length; length = scnprintf(tmpbuf, TMPBUFLEN, "%u", selinux_checkreqprot); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *page = NULL; ssize_t length; unsigned int new_value; length = task_has_security(current, SECURITY__SETCHECKREQPROT); if (length) goto out; length = -ENOMEM; if (count >= PAGE_SIZE) goto out; /* No partial writes. */ length = -EINVAL; if (*ppos != 0) goto out; length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; length = -EFAULT; if (copy_from_user(page, buf, count)) goto out; length = -EINVAL; if (sscanf(page, "%u", &new_value) != 1) goto out; selinux_checkreqprot = new_value ? 1 : 0; length = count; out: free_page((unsigned long) page); return length; } static const struct file_operations sel_checkreqprot_ops = { .read = sel_read_checkreqprot, .write = sel_write_checkreqprot, .llseek = generic_file_llseek, }; /* * Remaining nodes use transaction based IO methods like nfsd/nfsctl.c */ static ssize_t sel_write_access(struct file *file, char *buf, size_t size); static ssize_t sel_write_create(struct file *file, char *buf, size_t size); static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size); static ssize_t sel_write_user(struct file *file, char *buf, size_t size); static ssize_t sel_write_member(struct file *file, char *buf, size_t size); static ssize_t (*write_op[])(struct file *, char *, size_t) = { [SEL_ACCESS] = sel_write_access, [SEL_CREATE] = sel_write_create, [SEL_RELABEL] = sel_write_relabel, [SEL_USER] = sel_write_user, [SEL_MEMBER] = sel_write_member, [SEL_CONTEXT] = sel_write_context, }; static ssize_t selinux_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos) { ino_t ino = file_inode(file)->i_ino; char *data; ssize_t rv; if (ino >= ARRAY_SIZE(write_op) || !write_op[ino]) return -EINVAL; data = simple_transaction_get(file, buf, size); if (IS_ERR(data)) return PTR_ERR(data); rv = write_op[ino](file, data, size); if (rv > 0) { simple_transaction_set(file, rv); rv = size; } return rv; } static const struct file_operations transaction_ops = { .write = selinux_transaction_write, .read = simple_transaction_read, .release = simple_transaction_release, .llseek = generic_file_llseek, }; /* * payload - write methods * If the method has a response, the response should be put in buf, * and the length returned. Otherwise return 0 or and -error. */ static ssize_t sel_write_access(struct file *file, char *buf, size_t size) { char *scon = NULL, *tcon = NULL; u32 ssid, tsid; u16 tclass; struct av_decision avd; ssize_t length; length = task_has_security(current, SECURITY__COMPUTE_AV); if (length) goto out; length = -ENOMEM; scon = kzalloc(size + 1, GFP_KERNEL); if (!scon) goto out; length = -ENOMEM; tcon = kzalloc(size + 1, GFP_KERNEL); if (!tcon) goto out; length = -EINVAL; if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) goto out; length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); if (length) goto out; length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); if (length) goto out; security_compute_av_user(ssid, tsid, tclass, &avd); length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%x %x %x %x %u %x", avd.allowed, 0xffffffff, avd.auditallow, avd.auditdeny, avd.seqno, avd.flags); out: kfree(tcon); kfree(scon); return length; } static ssize_t sel_write_create(struct file *file, char *buf, size_t size) { char *scon = NULL, *tcon = NULL; char *namebuf = NULL, *objname = NULL; u32 ssid, tsid, newsid; u16 tclass; ssize_t length; char *newcon = NULL; u32 len; int nargs; length = task_has_security(current, SECURITY__COMPUTE_CREATE); if (length) goto out; length = -ENOMEM; scon = kzalloc(size + 1, GFP_KERNEL); if (!scon) goto out; length = -ENOMEM; tcon = kzalloc(size + 1, GFP_KERNEL); if (!tcon) goto out; length = -ENOMEM; namebuf = kzalloc(size + 1, GFP_KERNEL); if (!namebuf) goto out; length = -EINVAL; nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf); if (nargs < 3 || nargs > 4) goto out; if (nargs == 4) { /* * If and when the name of new object to be queried contains * either whitespace or multibyte characters, they shall be * encoded based on the percentage-encoding rule. * If not encoded, the sscanf logic picks up only left-half * of the supplied name; splitted by a whitespace unexpectedly. */ char *r, *w; int c1, c2; r = w = namebuf; do { c1 = *r++; if (c1 == '+') c1 = ' '; else if (c1 == '%') { c1 = hex_to_bin(*r++); if (c1 < 0) goto out; c2 = hex_to_bin(*r++); if (c2 < 0) goto out; c1 = (c1 << 4) | c2; } *w++ = c1; } while (c1 != '\0'); objname = namebuf; } length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); if (length) goto out; length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); if (length) goto out; length = security_transition_sid_user(ssid, tsid, tclass, objname, &newsid); if (length) goto out; length = security_sid_to_context(newsid, &newcon, &len); if (length) goto out; length = -ERANGE; if (len > SIMPLE_TRANSACTION_LIMIT) { printk(KERN_ERR "SELinux: %s: context size (%u) exceeds " "payload max\n", __func__, len); goto out; } memcpy(buf, newcon, len); length = len; out: kfree(newcon); kfree(namebuf); kfree(tcon); kfree(scon); return length; } static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size) { char *scon = NULL, *tcon = NULL; u32 ssid, tsid, newsid; u16 tclass; ssize_t length; char *newcon = NULL; u32 len; length = task_has_security(current, SECURITY__COMPUTE_RELABEL); if (length) goto out; length = -ENOMEM; scon = kzalloc(size + 1, GFP_KERNEL); if (!scon) goto out; length = -ENOMEM; tcon = kzalloc(size + 1, GFP_KERNEL); if (!tcon) goto out; length = -EINVAL; if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) goto out; length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); if (length) goto out; length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); if (length) goto out; length = security_change_sid(ssid, tsid, tclass, &newsid); if (length) goto out; length = security_sid_to_context(newsid, &newcon, &len); if (length) goto out; length = -ERANGE; if (len > SIMPLE_TRANSACTION_LIMIT) goto out; memcpy(buf, newcon, len); length = len; out: kfree(newcon); kfree(tcon); kfree(scon); return length; } static ssize_t sel_write_user(struct file *file, char *buf, size_t size) { char *con = NULL, *user = NULL, *ptr; u32 sid, *sids = NULL; ssize_t length; char *newcon; int i, rc; u32 len, nsids; length = task_has_security(current, SECURITY__COMPUTE_USER); if (length) goto out; length = -ENOMEM; con = kzalloc(size + 1, GFP_KERNEL); if (!con) goto out; length = -ENOMEM; user = kzalloc(size + 1, GFP_KERNEL); if (!user) goto out; length = -EINVAL; if (sscanf(buf, "%s %s", con, user) != 2) goto out; length = security_context_to_sid(con, strlen(con) + 1, &sid); if (length) goto out; length = security_get_user_sids(sid, user, &sids, &nsids); if (length) goto out; length = sprintf(buf, "%u", nsids) + 1; ptr = buf + length; for (i = 0; i < nsids; i++) { rc = security_sid_to_context(sids[i], &newcon, &len); if (rc) { length = rc; goto out; } if ((length + len) >= SIMPLE_TRANSACTION_LIMIT) { kfree(newcon); length = -ERANGE; goto out; } memcpy(ptr, newcon, len); kfree(newcon); ptr += len; length += len; } out: kfree(sids); kfree(user); kfree(con); return length; } static ssize_t sel_write_member(struct file *file, char *buf, size_t size) { char *scon = NULL, *tcon = NULL; u32 ssid, tsid, newsid; u16 tclass; ssize_t length; char *newcon = NULL; u32 len; length = task_has_security(current, SECURITY__COMPUTE_MEMBER); if (length) goto out; length = -ENOMEM; scon = kzalloc(size + 1, GFP_KERNEL); if (!scon) goto out; length = -ENOMEM; tcon = kzalloc(size + 1, GFP_KERNEL); if (!tcon) goto out; length = -EINVAL; if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) goto out; length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); if (length) goto out; length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); if (length) goto out; length = security_member_sid(ssid, tsid, tclass, &newsid); if (length) goto out; length = security_sid_to_context(newsid, &newcon, &len); if (length) goto out; length = -ERANGE; if (len > SIMPLE_TRANSACTION_LIMIT) { printk(KERN_ERR "SELinux: %s: context size (%u) exceeds " "payload max\n", __func__, len); goto out; } memcpy(buf, newcon, len); length = len; out: kfree(newcon); kfree(tcon); kfree(scon); return length; } static struct inode *sel_make_inode(struct super_block *sb, int mode) { struct inode *ret = new_inode(sb); if (ret) { ret->i_mode = mode; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; } return ret; } static ssize_t sel_read_bool(struct file *filep, char __user *buf, size_t count, loff_t *ppos) { char *page = NULL; ssize_t length; ssize_t ret; int cur_enforcing; unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK; const char *name = filep->f_path.dentry->d_name.name; mutex_lock(&sel_mutex); ret = -EINVAL; if (index >= bool_num || strcmp(name, bool_pending_names[index])) goto out; ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; cur_enforcing = security_get_bool_value(index); if (cur_enforcing < 0) { ret = cur_enforcing; goto out; } length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing, bool_pending_values[index]); ret = simple_read_from_buffer(buf, count, ppos, page, length); out: mutex_unlock(&sel_mutex); free_page((unsigned long)page); return ret; } static ssize_t sel_write_bool(struct file *filep, const char __user *buf, size_t count, loff_t *ppos) { char *page = NULL; ssize_t length; int new_value; unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK; const char *name = filep->f_path.dentry->d_name.name; mutex_lock(&sel_mutex); length = task_has_security(current, SECURITY__SETBOOL); if (length) goto out; length = -EINVAL; if (index >= bool_num || strcmp(name, bool_pending_names[index])) goto out; length = -ENOMEM; if (count >= PAGE_SIZE) goto out; /* No partial writes. */ length = -EINVAL; if (*ppos != 0) goto out; length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; length = -EFAULT; if (copy_from_user(page, buf, count)) goto out; length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; if (new_value) new_value = 1; bool_pending_values[index] = new_value; length = count; out: mutex_unlock(&sel_mutex); free_page((unsigned long) page); return length; } static const struct file_operations sel_bool_ops = { .read = sel_read_bool, .write = sel_write_bool, .llseek = generic_file_llseek, }; static ssize_t sel_commit_bools_write(struct file *filep, const char __user *buf, size_t count, loff_t *ppos) { char *page = NULL; ssize_t length; int new_value; mutex_lock(&sel_mutex); length = task_has_security(current, SECURITY__SETBOOL); if (length) goto out; length = -ENOMEM; if (count >= PAGE_SIZE) goto out; /* No partial writes. */ length = -EINVAL; if (*ppos != 0) goto out; length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; length = -EFAULT; if (copy_from_user(page, buf, count)) goto out; length = -EINVAL; if (sscanf(page, "%d", &new_value) != 1) goto out; length = 0; if (new_value && bool_pending_values) length = security_set_bools(bool_num, bool_pending_values); if (!length) length = count; out: mutex_unlock(&sel_mutex); free_page((unsigned long) page); return length; } static const struct file_operations sel_commit_bools_ops = { .write = sel_commit_bools_write, .llseek = generic_file_llseek, }; static void sel_remove_entries(struct dentry *de) { struct list_head *node; spin_lock(&de->d_lock); node = de->d_subdirs.next; while (node != &de->d_subdirs) { struct dentry *d = list_entry(node, struct dentry, d_u.d_child); spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); list_del_init(node); if (d->d_inode) { dget_dlock(d); spin_unlock(&de->d_lock); spin_unlock(&d->d_lock); d_delete(d); simple_unlink(de->d_inode, d); dput(d); spin_lock(&de->d_lock); } else spin_unlock(&d->d_lock); node = de->d_subdirs.next; } spin_unlock(&de->d_lock); } #define BOOL_DIR_NAME "booleans" static int sel_make_bools(void) { int i, ret; ssize_t len; struct dentry *dentry = NULL; struct dentry *dir = bool_dir; struct inode *inode = NULL; struct inode_security_struct *isec; char **names = NULL, *page; int num; int *values = NULL; u32 sid; /* remove any existing files */ for (i = 0; i < bool_num; i++) kfree(bool_pending_names[i]); kfree(bool_pending_names); kfree(bool_pending_values); bool_num = 0; bool_pending_names = NULL; bool_pending_values = NULL; sel_remove_entries(dir); ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; ret = security_get_bools(&num, &names, &values); if (ret) goto out; for (i = 0; i < num; i++) { ret = -ENOMEM; dentry = d_alloc_name(dir, names[i]); if (!dentry) goto out; ret = -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR); if (!inode) goto out; ret = -ENAMETOOLONG; len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]); if (len >= PAGE_SIZE) goto out; isec = (struct inode_security_struct *)inode->i_security; ret = security_genfs_sid("selinuxfs", page, SECCLASS_FILE, &sid); if (ret) goto out; isec->sid = sid; isec->initialized = 1; inode->i_fop = &sel_bool_ops; inode->i_ino = i|SEL_BOOL_INO_OFFSET; d_add(dentry, inode); } bool_num = num; bool_pending_names = names; bool_pending_values = values; free_page((unsigned long)page); return 0; out: free_page((unsigned long)page); if (names) { for (i = 0; i < num; i++) kfree(names[i]); kfree(names); } kfree(values); sel_remove_entries(dir); return ret; } #define NULL_FILE_NAME "null" struct path selinux_null; static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char tmpbuf[TMPBUFLEN]; ssize_t length; length = scnprintf(tmpbuf, TMPBUFLEN, "%u", avc_cache_threshold); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static ssize_t sel_write_avc_cache_threshold(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *page = NULL; ssize_t ret; int new_value; ret = task_has_security(current, SECURITY__SETSECPARAM); if (ret) goto out; ret = -ENOMEM; if (count >= PAGE_SIZE) goto out; /* No partial writes. */ ret = -EINVAL; if (*ppos != 0) goto out; ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; ret = -EFAULT; if (copy_from_user(page, buf, count)) goto out; ret = -EINVAL; if (sscanf(page, "%u", &new_value) != 1) goto out; avc_cache_threshold = new_value; ret = count; out: free_page((unsigned long)page); return ret; } static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char *page; ssize_t length; page = (char *)__get_free_page(GFP_KERNEL); if (!page) return -ENOMEM; length = avc_get_hash_stats(page); if (length >= 0) length = simple_read_from_buffer(buf, count, ppos, page, length); free_page((unsigned long)page); return length; } static const struct file_operations sel_avc_cache_threshold_ops = { .read = sel_read_avc_cache_threshold, .write = sel_write_avc_cache_threshold, .llseek = generic_file_llseek, }; static const struct file_operations sel_avc_hash_stats_ops = { .read = sel_read_avc_hash_stats, .llseek = generic_file_llseek, }; #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) { int cpu; for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *idx = cpu + 1; return &per_cpu(avc_cache_stats, cpu); } return NULL; } static void *sel_avc_stats_seq_start(struct seq_file *seq, loff_t *pos) { loff_t n = *pos - 1; if (*pos == 0) return SEQ_START_TOKEN; return sel_avc_get_stat_idx(&n); } static void *sel_avc_stats_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return sel_avc_get_stat_idx(pos); } static int sel_avc_stats_seq_show(struct seq_file *seq, void *v) { struct avc_cache_stats *st = v; if (v == SEQ_START_TOKEN) seq_printf(seq, "lookups hits misses allocations reclaims " "frees\n"); else { unsigned int lookups = st->lookups; unsigned int misses = st->misses; unsigned int hits = lookups - misses; seq_printf(seq, "%u %u %u %u %u %u\n", lookups, hits, misses, st->allocations, st->reclaims, st->frees); } return 0; } static void sel_avc_stats_seq_stop(struct seq_file *seq, void *v) { } static const struct seq_operations sel_avc_cache_stats_seq_ops = { .start = sel_avc_stats_seq_start, .next = sel_avc_stats_seq_next, .show = sel_avc_stats_seq_show, .stop = sel_avc_stats_seq_stop, }; static int sel_open_avc_cache_stats(struct inode *inode, struct file *file) { return seq_open(file, &sel_avc_cache_stats_seq_ops); } static const struct file_operations sel_avc_cache_stats_ops = { .open = sel_open_avc_cache_stats, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif static int sel_make_avc_files(struct dentry *dir) { int i; static struct tree_descr files[] = { { "cache_threshold", &sel_avc_cache_threshold_ops, S_IRUGO|S_IWUSR }, { "hash_stats", &sel_avc_hash_stats_ops, S_IRUGO }, #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS { "cache_stats", &sel_avc_cache_stats_ops, S_IRUGO }, #endif }; for (i = 0; i < ARRAY_SIZE(files); i++) { struct inode *inode; struct dentry *dentry; dentry = d_alloc_name(dir, files[i].name); if (!dentry) return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode); if (!inode) return -ENOMEM; inode->i_fop = files[i].ops; inode->i_ino = ++sel_last_ino; d_add(dentry, inode); } return 0; } static ssize_t sel_read_initcon(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *con; u32 sid, len; ssize_t ret; sid = file_inode(file)->i_ino&SEL_INO_MASK; ret = security_sid_to_context(sid, &con, &len); if (ret) return ret; ret = simple_read_from_buffer(buf, count, ppos, con, len); kfree(con); return ret; } static const struct file_operations sel_initcon_ops = { .read = sel_read_initcon, .llseek = generic_file_llseek, }; static int sel_make_initcon_files(struct dentry *dir) { int i; for (i = 1; i <= SECINITSID_NUM; i++) { struct inode *inode; struct dentry *dentry; dentry = d_alloc_name(dir, security_get_initial_sid_context(i)); if (!dentry) return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); if (!inode) return -ENOMEM; inode->i_fop = &sel_initcon_ops; inode->i_ino = i|SEL_INITCON_INO_OFFSET; d_add(dentry, inode); } return 0; } static inline unsigned long sel_class_to_ino(u16 class) { return (class * (SEL_VEC_MAX + 1)) | SEL_CLASS_INO_OFFSET; } static inline u16 sel_ino_to_class(unsigned long ino) { return (ino & SEL_INO_MASK) / (SEL_VEC_MAX + 1); } static inline unsigned long sel_perm_to_ino(u16 class, u32 perm) { return (class * (SEL_VEC_MAX + 1) + perm) | SEL_CLASS_INO_OFFSET; } static inline u32 sel_ino_to_perm(unsigned long ino) { return (ino & SEL_INO_MASK) % (SEL_VEC_MAX + 1); } static ssize_t sel_read_class(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long ino = file_inode(file)->i_ino; char res[TMPBUFLEN]; ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino)); return simple_read_from_buffer(buf, count, ppos, res, len); } static const struct file_operations sel_class_ops = { .read = sel_read_class, .llseek = generic_file_llseek, }; static ssize_t sel_read_perm(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long ino = file_inode(file)->i_ino; char res[TMPBUFLEN]; ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino)); return simple_read_from_buffer(buf, count, ppos, res, len); } static const struct file_operations sel_perm_ops = { .read = sel_read_perm, .llseek = generic_file_llseek, }; static ssize_t sel_read_policycap(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int value; char tmpbuf[TMPBUFLEN]; ssize_t length; unsigned long i_ino = file_inode(file)->i_ino; value = security_policycap_supported(i_ino & SEL_INO_MASK); length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static const struct file_operations sel_policycap_ops = { .read = sel_read_policycap, .llseek = generic_file_llseek, }; static int sel_make_perm_files(char *objclass, int classvalue, struct dentry *dir) { int i, rc, nperms; char **perms; rc = security_get_permissions(objclass, &perms, &nperms); if (rc) return rc; for (i = 0; i < nperms; i++) { struct inode *inode; struct dentry *dentry; rc = -ENOMEM; dentry = d_alloc_name(dir, perms[i]); if (!dentry) goto out; rc = -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); if (!inode) goto out; inode->i_fop = &sel_perm_ops; /* i+1 since perm values are 1-indexed */ inode->i_ino = sel_perm_to_ino(classvalue, i + 1); d_add(dentry, inode); } rc = 0; out: for (i = 0; i < nperms; i++) kfree(perms[i]); kfree(perms); return rc; } static int sel_make_class_dir_entries(char *classname, int index, struct dentry *dir) { struct dentry *dentry = NULL; struct inode *inode = NULL; int rc; dentry = d_alloc_name(dir, "index"); if (!dentry) return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); if (!inode) return -ENOMEM; inode->i_fop = &sel_class_ops; inode->i_ino = sel_class_to_ino(index); d_add(dentry, inode); dentry = sel_make_dir(dir, "perms", &last_class_ino); if (IS_ERR(dentry)) return PTR_ERR(dentry); rc = sel_make_perm_files(classname, index, dentry); return rc; } static void sel_remove_classes(void) { struct list_head *class_node; list_for_each(class_node, &class_dir->d_subdirs) { struct dentry *class_subdir = list_entry(class_node, struct dentry, d_u.d_child); struct list_head *class_subdir_node; list_for_each(class_subdir_node, &class_subdir->d_subdirs) { struct dentry *d = list_entry(class_subdir_node, struct dentry, d_u.d_child); if (d->d_inode) if (d->d_inode->i_mode & S_IFDIR) sel_remove_entries(d); } sel_remove_entries(class_subdir); } sel_remove_entries(class_dir); } static int sel_make_classes(void) { int rc, nclasses, i; char **classes; /* delete any existing entries */ sel_remove_classes(); rc = security_get_classes(&classes, &nclasses); if (rc) return rc; /* +2 since classes are 1-indexed */ last_class_ino = sel_class_to_ino(nclasses + 2); for (i = 0; i < nclasses; i++) { struct dentry *class_name_dir; class_name_dir = sel_make_dir(class_dir, classes[i], &last_class_ino); if (IS_ERR(class_name_dir)) { rc = PTR_ERR(class_name_dir); goto out; } /* i+1 since class values are 1-indexed */ rc = sel_make_class_dir_entries(classes[i], i + 1, class_name_dir); if (rc) goto out; } rc = 0; out: for (i = 0; i < nclasses; i++) kfree(classes[i]); kfree(classes); return rc; } static int sel_make_policycap(void) { unsigned int iter; struct dentry *dentry = NULL; struct inode *inode = NULL; sel_remove_entries(policycap_dir); for (iter = 0; iter <= POLICYDB_CAPABILITY_MAX; iter++) { if (iter < ARRAY_SIZE(policycap_names)) dentry = d_alloc_name(policycap_dir, policycap_names[iter]); else dentry = d_alloc_name(policycap_dir, "unknown"); if (dentry == NULL) return -ENOMEM; inode = sel_make_inode(policycap_dir->d_sb, S_IFREG | S_IRUGO); if (inode == NULL) return -ENOMEM; inode->i_fop = &sel_policycap_ops; inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET; d_add(dentry, inode); } return 0; } static struct dentry *sel_make_dir(struct dentry *dir, const char *name, unsigned long *ino) { struct dentry *dentry = d_alloc_name(dir, name); struct inode *inode; if (!dentry) return ERR_PTR(-ENOMEM); inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO); if (!inode) { dput(dentry); return ERR_PTR(-ENOMEM); } inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; inode->i_ino = ++(*ino); /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_add(dentry, inode); /* bump link count on parent directory, too */ inc_nlink(dir->d_inode); return dentry; } static int sel_fill_super(struct super_block *sb, void *data, int silent) { int ret; struct dentry *dentry; struct inode *inode; struct inode_security_struct *isec; static struct tree_descr selinux_files[] = { [SEL_LOAD] = {"load", &sel_load_ops, S_IRUSR|S_IWUSR}, [SEL_ENFORCE] = {"enforce", &sel_enforce_ops, S_IRUGO|S_IWUSR}, [SEL_CONTEXT] = {"context", &transaction_ops, S_IRUGO|S_IWUGO}, [SEL_ACCESS] = {"access", &transaction_ops, S_IRUGO|S_IWUGO}, [SEL_CREATE] = {"create", &transaction_ops, S_IRUGO|S_IWUGO}, [SEL_RELABEL] = {"relabel", &transaction_ops, S_IRUGO|S_IWUGO}, [SEL_USER] = {"user", &transaction_ops, S_IRUGO|S_IWUGO}, [SEL_POLICYVERS] = {"policyvers", &sel_policyvers_ops, S_IRUGO}, [SEL_COMMIT_BOOLS] = {"commit_pending_bools", &sel_commit_bools_ops, S_IWUSR}, [SEL_MLS] = {"mls", &sel_mls_ops, S_IRUGO}, [SEL_DISABLE] = {"disable", &sel_disable_ops, S_IWUSR}, [SEL_MEMBER] = {"member", &transaction_ops, S_IRUGO|S_IWUGO}, [SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR}, [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO}, [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO}, [SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO}, [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO}, /* last one */ {""} }; ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files); if (ret) goto err; bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino); if (IS_ERR(bool_dir)) { ret = PTR_ERR(bool_dir); bool_dir = NULL; goto err; } ret = -ENOMEM; dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME); if (!dentry) goto err; ret = -ENOMEM; inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO); if (!inode) goto err; inode->i_ino = ++sel_last_ino; isec = (struct inode_security_struct *)inode->i_security; isec->sid = SECINITSID_DEVNULL; isec->sclass = SECCLASS_CHR_FILE; isec->initialized = 1; init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3)); d_add(dentry, inode); selinux_null.dentry = dentry; dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto err; } ret = sel_make_avc_files(dentry); if (ret) goto err; dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto err; } ret = sel_make_initcon_files(dentry); if (ret) goto err; class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino); if (IS_ERR(class_dir)) { ret = PTR_ERR(class_dir); class_dir = NULL; goto err; } policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino); if (IS_ERR(policycap_dir)) { ret = PTR_ERR(policycap_dir); policycap_dir = NULL; goto err; } return 0; err: printk(KERN_ERR "SELinux: %s: failed while creating inodes\n", __func__); return ret; } static struct dentry *sel_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, sel_fill_super); } static struct file_system_type sel_fs_type = { .name = "selinuxfs", .mount = sel_mount, .kill_sb = kill_litter_super, }; struct vfsmount *selinuxfs_mount; static struct kobject *selinuxfs_kobj; static int __init init_sel_fs(void) { int err; if (!selinux_enabled) return 0; selinuxfs_kobj = kobject_create_and_add("selinux", fs_kobj); if (!selinuxfs_kobj) return -ENOMEM; err = register_filesystem(&sel_fs_type); if (err) { kobject_put(selinuxfs_kobj); return err; } selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type); if (IS_ERR(selinuxfs_mount)) { printk(KERN_ERR "selinuxfs: could not mount!\n"); err = PTR_ERR(selinuxfs_mount); selinuxfs_mount = NULL; } return err; } __initcall(init_sel_fs); #ifdef CONFIG_SECURITY_SELINUX_DISABLE void exit_sel_fs(void) { kobject_put(selinuxfs_kobj); kern_unmount(selinuxfs_mount); unregister_filesystem(&sel_fs_type); } #endif
gpl-2.0
friendlyarm/linux-4.x.y
arch/mips/alchemy/common/platform.c
1853
12223
/* * Platform device support for Au1x00 SoCs. * * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> * * (C) Copyright Embedded Alley Solutions, Inc 2005 * Author: Pantelis Antoniou <pantelis@embeddedalley.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/slab.h> #include <linux/usb/ehci_pdriver.h> #include <linux/usb/ohci_pdriver.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_dbdma.h> #include <asm/mach-au1x00/au1100_mmc.h> #include <asm/mach-au1x00/au1xxx_eth.h> #include <prom.h> static void alchemy_8250_pm(struct uart_port *port, unsigned int state, unsigned int old_state) { #ifdef CONFIG_SERIAL_8250 switch (state) { case 0: alchemy_uart_enable(CPHYSADDR(port->membase)); serial8250_do_pm(port, state, old_state); break; case 3: /* power off */ serial8250_do_pm(port, state, old_state); alchemy_uart_disable(CPHYSADDR(port->membase)); break; default: serial8250_do_pm(port, state, old_state); break; } #endif } #define PORT(_base, _irq) \ { \ .mapbase = _base, \ .irq = _irq, \ .regshift = 2, \ .iotype = UPIO_AU, \ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \ UPF_FIXED_TYPE, \ .type = PORT_16550A, \ .pm = alchemy_8250_pm, \ } static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { [ALCHEMY_CPU_AU1000] = { PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT), PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT), }, [ALCHEMY_CPU_AU1500] = { PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT), }, [ALCHEMY_CPU_AU1100] = { PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT), }, [ALCHEMY_CPU_AU1550] = { PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT), PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT), }, [ALCHEMY_CPU_AU1200] = { PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT), PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT), }, [ALCHEMY_CPU_AU1300] = { PORT(AU1300_UART0_PHYS_ADDR, AU1300_UART0_INT), PORT(AU1300_UART1_PHYS_ADDR, AU1300_UART1_INT), PORT(AU1300_UART2_PHYS_ADDR, AU1300_UART2_INT), PORT(AU1300_UART3_PHYS_ADDR, AU1300_UART3_INT), }, }; static struct platform_device au1xx0_uart_device = { .name = "serial8250", .id = PLAT8250_DEV_AU1X00, }; static void __init alchemy_setup_uarts(int ctype) { long uartclk; int s = sizeof(struct plat_serial8250_port); int c = alchemy_get_uarts(ctype); struct plat_serial8250_port *ports; struct clk *clk = clk_get(NULL, ALCHEMY_PERIPH_CLK); if (IS_ERR(clk)) return; if (clk_prepare_enable(clk)) { clk_put(clk); return; } uartclk = clk_get_rate(clk); clk_put(clk); ports = kzalloc(s * (c + 1), GFP_KERNEL); if (!ports) { printk(KERN_INFO "Alchemy: no memory for UART data\n"); return; } memcpy(ports, au1x00_uart_data[ctype], s * c); au1xx0_uart_device.dev.platform_data = ports; /* Fill up uartclk. */ for (s = 0; s < c; s++) ports[s].uartclk = uartclk; if (platform_device_register(&au1xx0_uart_device)) printk(KERN_INFO "Alchemy: failed to register UARTs\n"); } /* The dmamask must be set for OHCI/EHCI to work */ static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32); static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32); /* Power on callback for the ehci platform driver */ static int alchemy_ehci_power_on(struct platform_device *pdev) { return alchemy_usb_control(ALCHEMY_USB_EHCI0, 1); } /* Power off/suspend callback for the ehci platform driver */ static void alchemy_ehci_power_off(struct platform_device *pdev) { alchemy_usb_control(ALCHEMY_USB_EHCI0, 0); } static struct usb_ehci_pdata alchemy_ehci_pdata = { .no_io_watchdog = 1, .power_on = alchemy_ehci_power_on, .power_off = alchemy_ehci_power_off, .power_suspend = alchemy_ehci_power_off, }; /* Power on callback for the ohci platform driver */ static int alchemy_ohci_power_on(struct platform_device *pdev) { int unit; unit = (pdev->id == 1) ? ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0; return alchemy_usb_control(unit, 1); } /* Power off/suspend callback for the ohci platform driver */ static void alchemy_ohci_power_off(struct platform_device *pdev) { int unit; unit = (pdev->id == 1) ? ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0; alchemy_usb_control(unit, 0); } static struct usb_ohci_pdata alchemy_ohci_pdata = { .power_on = alchemy_ohci_power_on, .power_off = alchemy_ohci_power_off, .power_suspend = alchemy_ohci_power_off, }; static unsigned long alchemy_ohci_data[][2] __initdata = { [ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT }, [ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT }, [ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT }, [ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT }, [ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT }, [ALCHEMY_CPU_AU1300] = { AU1300_USB_OHCI0_PHYS_ADDR, AU1300_USB_INT }, }; static unsigned long alchemy_ehci_data[][2] __initdata = { [ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT }, [ALCHEMY_CPU_AU1300] = { AU1300_USB_EHCI_PHYS_ADDR, AU1300_USB_INT }, }; static int __init _new_usbres(struct resource **r, struct platform_device **d) { *r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL); if (!*r) return -ENOMEM; *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL); if (!*d) { kfree(*r); return -ENOMEM; } (*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32); (*d)->num_resources = 2; (*d)->resource = *r; return 0; } static void __init alchemy_setup_usb(int ctype) { struct resource *res; struct platform_device *pdev; /* setup OHCI0. Every variant has one */ if (_new_usbres(&res, &pdev)) return; res[0].start = alchemy_ohci_data[ctype][0]; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = alchemy_ohci_data[ctype][1]; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ohci-platform"; pdev->id = 0; pdev->dev.dma_mask = &alchemy_ohci_dmamask; pdev->dev.platform_data = &alchemy_ohci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n"); /* setup EHCI0: Au1200/Au1300 */ if ((ctype == ALCHEMY_CPU_AU1200) || (ctype == ALCHEMY_CPU_AU1300)) { if (_new_usbres(&res, &pdev)) return; res[0].start = alchemy_ehci_data[ctype][0]; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = alchemy_ehci_data[ctype][1]; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ehci-platform"; pdev->id = 0; pdev->dev.dma_mask = &alchemy_ehci_dmamask; pdev->dev.platform_data = &alchemy_ehci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n"); } /* Au1300: OHCI1 */ if (ctype == ALCHEMY_CPU_AU1300) { if (_new_usbres(&res, &pdev)) return; res[0].start = AU1300_USB_OHCI1_PHYS_ADDR; res[0].end = res[0].start + 0x100 - 1; res[0].flags = IORESOURCE_MEM; res[1].start = AU1300_USB_INT; res[1].end = res[1].start; res[1].flags = IORESOURCE_IRQ; pdev->name = "ohci-platform"; pdev->id = 1; pdev->dev.dma_mask = &alchemy_ohci_dmamask; pdev->dev.platform_data = &alchemy_ohci_pdata; if (platform_device_register(pdev)) printk(KERN_INFO "Alchemy USB: cannot add OHCI1\n"); } } /* Macro to help defining the Ethernet MAC resources */ #define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */ #define MAC_RES(_base, _enable, _irq, _macdma) \ { \ .start = _base, \ .end = _base + 0xffff, \ .flags = IORESOURCE_MEM, \ }, \ { \ .start = _enable, \ .end = _enable + 0x3, \ .flags = IORESOURCE_MEM, \ }, \ { \ .start = _irq, \ .end = _irq, \ .flags = IORESOURCE_IRQ \ }, \ { \ .start = _macdma, \ .end = _macdma + 0x1ff, \ .flags = IORESOURCE_MEM, \ } static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = { [ALCHEMY_CPU_AU1000] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1000_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1500] = { MAC_RES(AU1500_MAC0_PHYS_ADDR, AU1500_MACEN_PHYS_ADDR, AU1500_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1100] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1100_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, [ALCHEMY_CPU_AU1550] = { MAC_RES(AU1000_MAC0_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR, AU1550_MAC0_DMA_INT, AU1000_MACDMA0_PHYS_ADDR) }, }; static struct au1000_eth_platform_data au1xxx_eth0_platform_data = { .phy1_search_mac0 = 1, }; static struct platform_device au1xxx_eth0_device = { .name = "au1000-eth", .id = 0, .num_resources = MAC_RES_COUNT, .dev.platform_data = &au1xxx_eth0_platform_data, }; static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = { [ALCHEMY_CPU_AU1000] = { MAC_RES(AU1000_MAC1_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR + 4, AU1000_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, [ALCHEMY_CPU_AU1500] = { MAC_RES(AU1500_MAC1_PHYS_ADDR, AU1500_MACEN_PHYS_ADDR + 4, AU1500_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, [ALCHEMY_CPU_AU1550] = { MAC_RES(AU1000_MAC1_PHYS_ADDR, AU1000_MACEN_PHYS_ADDR + 4, AU1550_MAC1_DMA_INT, AU1000_MACDMA1_PHYS_ADDR) }, }; static struct au1000_eth_platform_data au1xxx_eth1_platform_data = { .phy1_search_mac0 = 1, }; static struct platform_device au1xxx_eth1_device = { .name = "au1000-eth", .id = 1, .num_resources = MAC_RES_COUNT, .dev.platform_data = &au1xxx_eth1_platform_data, }; void __init au1xxx_override_eth_cfg(unsigned int port, struct au1000_eth_platform_data *eth_data) { if (!eth_data || port > 1) return; if (port == 0) memcpy(&au1xxx_eth0_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); else memcpy(&au1xxx_eth1_platform_data, eth_data, sizeof(struct au1000_eth_platform_data)); } static void __init alchemy_setup_macs(int ctype) { int ret, i; unsigned char ethaddr[6]; struct resource *macres; /* Handle 1st MAC */ if (alchemy_get_macs(ctype) < 1) return; macres = kmemdup(au1xxx_eth0_resources[ctype], sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); if (!macres) { printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n"); return; } au1xxx_eth0_device.resource = macres; i = prom_get_ethernet_addr(ethaddr); if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac)) memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); ret = platform_device_register(&au1xxx_eth0_device); if (ret) printk(KERN_INFO "Alchemy: failed to register MAC0\n"); /* Handle 2nd MAC */ if (alchemy_get_macs(ctype) < 2) return; macres = kmemdup(au1xxx_eth1_resources[ctype], sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL); if (!macres) { printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n"); return; } au1xxx_eth1_device.resource = macres; ethaddr[5] += 1; /* next addr for 2nd MAC */ if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac)) memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6); /* Register second MAC if enabled in pinfunc */ if (!(alchemy_rdsys(AU1000_SYS_PINFUNC) & SYS_PF_NI2)) { ret = platform_device_register(&au1xxx_eth1_device); if (ret) printk(KERN_INFO "Alchemy: failed to register MAC1\n"); } } static int __init au1xxx_platform_init(void) { int ctype = alchemy_get_cputype(); alchemy_setup_uarts(ctype); alchemy_setup_macs(ctype); alchemy_setup_usb(ctype); return 0; } arch_initcall(au1xxx_platform_init);
gpl-2.0
bestmjh47/ActiveKernel_M250S-JB
net/atm/lec.c
2365
64855
/* * lec.c: Lan Emulation driver * * Marko Kiiskila <mkiiskila@yahoo.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/capability.h> /* We are ethernet device */ #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include <net/arp.h> #include <net/dst.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/seq_file.h> /* TokenRing if needed */ #ifdef CONFIG_TR #include <linux/trdevice.h> #endif /* And atm device */ #include <linux/atmdev.h> #include <linux/atmlec.h> /* Proxy LEC knows about bridging */ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) #include "../bridge/br_private.h" static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; #endif /* Modular too */ #include <linux/module.h> #include <linux/init.h> #include "lec.h" #include "lec_arpc.h" #include "resources.h" #define DUMP_PACKETS 0 /* * 0 = None, * 1 = 30 first bytes * 2 = Whole packet */ #define LEC_UNRES_QUE_LEN 8 /* * number of tx packets to queue for a * single destination while waiting for SVC */ static int lec_open(struct net_device *dev); static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct net_device *dev); static int lec_close(struct net_device *dev); static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, const unsigned char *mac_addr); static int lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove); /* LANE2 functions */ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_address, const u8 *tlvs, u32 sizeoftlvs); static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force, u8 **tlvs, u32 *sizeoftlvs); static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, const u8 *tlvs, u32 sizeoftlvs); static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long permanent); static void lec_arp_check_empties(struct lec_priv *priv, struct atm_vcc *vcc, struct sk_buff *skb); static void lec_arp_destroy(struct lec_priv *priv); static void lec_arp_init(struct lec_priv *priv); static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, const unsigned char *mac_to_find, int is_rdesc, struct lec_arp_table **ret_entry); static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, const unsigned char *atm_addr, unsigned long remoteflag, unsigned int targetless_le_arp); static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); static void lec_set_flush_tran_id(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long tran_id); static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, struct atm_vcc *vcc, void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb)); static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc); /* must be done under lec_arp_lock */ static inline void lec_arp_hold(struct lec_arp_table *entry) { atomic_inc(&entry->usage); } static inline void lec_arp_put(struct lec_arp_table *entry) { if (atomic_dec_and_test(&entry->usage)) kfree(entry); } static struct lane2_ops lane2_ops = { lane2_resolve, /* resolve, spec 3.1.3 */ lane2_associate_req, /* associate_req, spec 3.1.4 */ NULL /* associate indicator, spec 3.1.5 */ }; static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* Device structures */ static struct net_device *dev_lec[MAX_LEC_ITF]; #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) { char *buff; struct lec_priv *priv; /* * Check if this is a BPDU. If so, ask zeppelin to send * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit * as the Config BPDU has */ buff = skb->data + skb->dev->hard_header_len; if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) { struct sock *sk; struct sk_buff *skb2; struct atmlec_msg *mesg; skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (skb2 == NULL) return; skb2->len = sizeof(struct atmlec_msg); mesg = (struct atmlec_msg *)skb2->data; mesg->type = l_topology_change; buff += 4; mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */ priv = netdev_priv(dev); atm_force_charge(priv->lecd, skb2->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb2); sk->sk_data_ready(sk, skb2->len); } } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ /* * Modelled after tr_type_trans * All multicast and ARE or STE frames go to BUS. * Non source routed frames go by destination address. * Last hop source routed frames go by destination address. * Not last hop source routed frames go by _next_ route descriptor. * Returns pointer to destination MAC address or fills in rdesc * and returns NULL. */ #ifdef CONFIG_TR static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) { struct trh_hdr *trh; unsigned int riflen, num_rdsc; trh = (struct trh_hdr *)packet; if (trh->daddr[0] & (uint8_t) 0x80) return bus_mac; /* multicast */ if (trh->saddr[0] & TR_RII) { riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; if ((ntohs(trh->rcf) >> 13) != 0) return bus_mac; /* ARE or STE */ } else return trh->daddr; /* not source routed */ if (riflen < 6) return trh->daddr; /* last hop, source routed */ /* riflen is 6 or more, packet has more than one route descriptor */ num_rdsc = (riflen / 2) - 1; memset(rdesc, 0, ETH_ALEN); /* offset 4 comes from LAN destination field in LE control frames */ if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT)) memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16)); else { memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16)); rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); } return NULL; } #endif /* CONFIG_TR */ /* * Open/initialize the netdevice. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is non-reboot way to recover if something goes wrong. */ static int lec_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static void lec_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = skb->dev; ATM_SKB(skb)->vcc = vcc; ATM_SKB(skb)->atm_options = vcc->atm_options; atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); if (vcc->send(vcc, skb) < 0) { dev->stats.tx_dropped++; return; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } static void lec_tx_timeout(struct net_device *dev) { pr_info("%s\n", dev->name); dev->trans_start = jiffies; netif_wake_queue(dev); } static netdev_tx_t lec_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *skb2; struct lec_priv *priv = netdev_priv(dev); struct lecdatahdr_8023 *lec_h; struct atm_vcc *vcc; struct lec_arp_table *entry; unsigned char *dst; int min_frame_size; #ifdef CONFIG_TR unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ #endif int is_rdesc; pr_debug("called\n"); if (!priv->lecd) { pr_info("%s:No lecd attached\n", dev->name); dev->stats.tx_errors++; netif_stop_queue(dev); kfree_skb(skb); return NETDEV_TX_OK; } pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), (long)skb_end_pointer(skb)); #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) lec_handle_bridge(skb, dev); #endif /* Make sure we have room for lec_id */ if (skb_headroom(skb) < 2) { pr_debug("reallocating skb\n"); skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); kfree_skb(skb); if (skb2 == NULL) return NETDEV_TX_OK; skb = skb2; } skb_push(skb, 2); /* Put le header to place, works for TokenRing too */ lec_h = (struct lecdatahdr_8023 *)skb->data; lec_h->le_header = htons(priv->lecid); #ifdef CONFIG_TR /* * Ugly. Use this to realign Token Ring packets for * e.g. PCA-200E driver. */ if (priv->is_trdev) { skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); kfree_skb(skb); if (skb2 == NULL) return NETDEV_TX_OK; skb = skb2; } #endif #if DUMP_PACKETS >= 2 #define MAX_DUMP_SKB 99 #elif DUMP_PACKETS >= 1 #define MAX_DUMP_SKB 30 #endif #if DUMP_PACKETS >= 1 printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n", dev->name, skb->len, priv->lecid); print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1, skb->data, min(skb->len, MAX_DUMP_SKB), true); #endif /* DUMP_PACKETS >= 1 */ /* Minimum ethernet-frame size */ #ifdef CONFIG_TR if (priv->is_trdev) min_frame_size = LEC_MINIMUM_8025_SIZE; else #endif min_frame_size = LEC_MINIMUM_8023_SIZE; if (skb->len < min_frame_size) { if ((skb->len + skb_tailroom(skb)) < min_frame_size) { skb2 = skb_copy_expand(skb, 0, min_frame_size - skb->truesize, GFP_ATOMIC); dev_kfree_skb(skb); if (skb2 == NULL) { dev->stats.tx_dropped++; return NETDEV_TX_OK; } skb = skb2; } skb_put(skb, min_frame_size - skb->len); } /* Send to right vcc */ is_rdesc = 0; dst = lec_h->h_dest; #ifdef CONFIG_TR if (priv->is_trdev) { dst = get_tr_dst(skb->data + 2, rdesc); if (dst == NULL) { dst = rdesc; is_rdesc = 1; } } #endif entry = NULL; vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name, vcc, vcc ? vcc->flags : 0, entry); if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) { if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { pr_debug("%s:queuing packet, MAC address %pM\n", dev->name, lec_h->h_dest); skb_queue_tail(&entry->tx_wait, skb); } else { pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n", dev->name, lec_h->h_dest); dev->stats.tx_dropped++; dev_kfree_skb(skb); } goto out; } #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); #endif /* DUMP_PACKETS > 0 */ while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest); lec_send(vcc, skb2); } lec_send(vcc, skb); if (!atm_may_send(vcc, 0)) { struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); vpriv->xoff = 1; netif_stop_queue(dev); /* * vcc->pop() might have occurred in between, making * the vcc usuable again. Since xmit is serialized, * this is the only situation we have to re-test. */ if (atm_may_send(vcc, 0)) netif_wake_queue(dev); } out: if (entry) lec_arp_put(entry); dev->trans_start = jiffies; return NETDEV_TX_OK; } /* The inverse routine to net_open(). */ static int lec_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); struct atmlec_msg *mesg; struct lec_arp_table *entry; int i; char *tmp; /* FIXME */ atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); mesg = (struct atmlec_msg *)skb->data; tmp = skb->data; tmp += sizeof(struct atmlec_msg); pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); switch (mesg->type) { case l_set_mac_addr: for (i = 0; i < 6; i++) dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; break; case l_del_mac_addr: for (i = 0; i < 6; i++) dev->dev_addr[i] = 0; break; case l_addr_delete: lec_addr_delete(priv, mesg->content.normal.atm_addr, mesg->content.normal.flag); break; case l_topology_change: priv->topology_change = mesg->content.normal.flag; break; case l_flush_complete: lec_flush_complete(priv, mesg->content.normal.flag); break; case l_narp_req: /* LANE2: see 7.1.35 in the lane2 spec */ spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mesg->content.normal.mac_addr); lec_arp_remove(priv, entry); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (mesg->content.normal.no_source_le_narp) break; /* FALL THROUGH */ case l_arp_update: lec_arp_update(priv, mesg->content.normal.mac_addr, mesg->content.normal.atm_addr, mesg->content.normal.flag, mesg->content.normal.targetless_le_arp); pr_debug("in l_arp_update\n"); if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ pr_debug("LANE2 3.1.5, got tlvs, size %d\n", mesg->sizeoftlvs); lane2_associate_ind(dev, mesg->content.normal.mac_addr, tmp, mesg->sizeoftlvs); } break; case l_config: priv->maximum_unknown_frame_count = mesg->content.config.maximum_unknown_frame_count; priv->max_unknown_frame_time = (mesg->content.config.max_unknown_frame_time * HZ); priv->max_retry_count = mesg->content.config.max_retry_count; priv->aging_time = (mesg->content.config.aging_time * HZ); priv->forward_delay_time = (mesg->content.config.forward_delay_time * HZ); priv->arp_response_time = (mesg->content.config.arp_response_time * HZ); priv->flush_timeout = (mesg->content.config.flush_timeout * HZ); priv->path_switching_delay = (mesg->content.config.path_switching_delay * HZ); priv->lane_version = mesg->content.config.lane_version; /* LANE2 */ priv->lane2_ops = NULL; if (priv->lane_version > 1) priv->lane2_ops = &lane2_ops; if (dev_set_mtu(dev, mesg->content.config.mtu)) pr_info("%s: change_mtu to %d failed\n", dev->name, mesg->content.config.mtu); priv->is_proxy = mesg->content.config.is_proxy; break; case l_flush_tran_id: lec_set_flush_tran_id(priv, mesg->content.normal.atm_addr, mesg->content.normal.flag); break; case l_set_lecid: priv->lecid = (unsigned short)(0xffff & mesg->content.normal.flag); break; case l_should_bridge: #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) { pr_debug("%s: bridge zeppelin asks about %pM\n", dev->name, mesg->content.proxy.mac_addr); if (br_fdb_test_addr_hook == NULL) break; if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) { /* hit from bridge table, send LE_ARP_RESPONSE */ struct sk_buff *skb2; struct sock *sk; pr_debug("%s: entry found, responding to zeppelin\n", dev->name); skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (skb2 == NULL) break; skb2->len = sizeof(struct atmlec_msg); skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg)); atm_force_charge(priv->lecd, skb2->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb2); sk->sk_data_ready(sk, skb2->len); } } #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ break; default: pr_info("%s: Unknown message type %d\n", dev->name, mesg->type); dev_kfree_skb(skb); return -EINVAL; } dev_kfree_skb(skb); return 0; } static void lec_atm_close(struct atm_vcc *vcc) { struct sk_buff *skb; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); priv->lecd = NULL; /* Do something needful? */ netif_stop_queue(dev); lec_arp_destroy(priv); if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) pr_info("%s closing with messages pending\n", dev->name); while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) { atm_return(vcc, skb->truesize); dev_kfree_skb(skb); } pr_info("%s: Shut down!\n", dev->name); module_put(THIS_MODULE); } static struct atmdev_ops lecdev_ops = { .close = lec_atm_close, .send = lec_atm_send }; static struct atm_dev lecatm_dev = { .ops = &lecdev_ops, .type = "lec", .number = 999, /* dummy device number */ .lock = __SPIN_LOCK_UNLOCKED(lecatm_dev.lock) }; /* * LANE2: new argument struct sk_buff *data contains * the LE_ARP based TLVs introduced in the LANE2 spec */ static int send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, const unsigned char *mac_addr, const unsigned char *atm_addr, struct sk_buff *data) { struct sock *sk; struct sk_buff *skb; struct atmlec_msg *mesg; if (!priv || !priv->lecd) return -1; skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); if (!skb) return -1; skb->len = sizeof(struct atmlec_msg); mesg = (struct atmlec_msg *)skb->data; memset(mesg, 0, sizeof(struct atmlec_msg)); mesg->type = type; if (data != NULL) mesg->sizeoftlvs = data->len; if (mac_addr) memcpy(&mesg->content.normal.mac_addr, mac_addr, ETH_ALEN); else mesg->content.normal.targetless_le_arp = 1; if (atm_addr) memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN); atm_force_charge(priv->lecd, skb->truesize); sk = sk_atm(priv->lecd); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); if (data != NULL) { pr_debug("about to send %d bytes of data\n", data->len); atm_force_charge(priv->lecd, data->truesize); skb_queue_tail(&sk->sk_receive_queue, data); sk->sk_data_ready(sk, skb->len); } return 0; } /* shamelessly stolen from drivers/net/net_init.c */ static int lec_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > 18190)) return -EINVAL; dev->mtu = new_mtu; return 0; } static void lec_set_multicast_list(struct net_device *dev) { /* * by default, all multicast frames arrive over the bus. * eventually support selective multicast service */ } static const struct net_device_ops lec_netdev_ops = { .ndo_open = lec_open, .ndo_stop = lec_close, .ndo_start_xmit = lec_start_xmit, .ndo_change_mtu = lec_change_mtu, .ndo_tx_timeout = lec_tx_timeout, .ndo_set_multicast_list = lec_set_multicast_list, }; static const unsigned char lec_ctrl_magic[] = { 0xff, 0x00, 0x01, 0x01 }; #define LEC_DATA_DIRECT_8023 2 #define LEC_DATA_DIRECT_8025 3 static int lec_is_data_direct(struct atm_vcc *vcc) { return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) || (vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025)); } static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = netdev_priv(dev); #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); #endif if (!skb) { pr_debug("%s: null skb\n", dev->name); lec_vcc_close(priv, vcc); return; } #if DUMP_PACKETS >= 2 #define MAX_SKB_DUMP 99 #elif DUMP_PACKETS >= 1 #define MAX_SKB_DUMP 30 #endif #if DUMP_PACKETS > 0 printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n", dev->name, skb->len, priv->lecid); print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1, skb->data, min(MAX_SKB_DUMP, skb->len), true); #endif /* DUMP_PACKETS > 0 */ if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */ struct sock *sk = sk_atm(vcc); pr_debug("%s: To daemon\n", dev->name); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); } else { /* Data frame, queue to protocol handlers */ struct lec_arp_table *entry; unsigned char *src, *dst; atm_return(vcc, skb->truesize); if (*(__be16 *) skb->data == htons(priv->lecid) || !priv->lecd || !(dev->flags & IFF_UP)) { /* * Probably looping back, or if lecd is missing, * lecd has gone down */ pr_debug("Ignoring frame...\n"); dev_kfree_skb(skb); return; } #ifdef CONFIG_TR if (priv->is_trdev) dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest; else #endif dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; /* * If this is a Data Direct VCC, and the VCC does not match * the LE_ARP cache entry, delete the LE_ARP cache entry. */ spin_lock_irqsave(&priv->lec_arp_lock, flags); if (lec_is_data_direct(vcc)) { #ifdef CONFIG_TR if (priv->is_trdev) src = ((struct lecdatahdr_8025 *)skb->data)-> h_source; else #endif src = ((struct lecdatahdr_8023 *)skb->data)-> h_source; entry = lec_arp_find(priv, src); if (entry && entry->vcc != vcc) { lec_arp_remove(priv, entry); lec_arp_put(entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (!(dst[0] & 0x01) && /* Never filter Multi/Broadcast */ !priv->is_proxy && /* Proxy wants all the packets */ memcmp(dst, dev->dev_addr, dev->addr_len)) { dev_kfree_skb(skb); return; } if (!hlist_empty(&priv->lec_arp_empty_ones)) lec_arp_check_empties(priv, vcc, skb); skb_pull(skb, 2); /* skip lec_id */ #ifdef CONFIG_TR if (priv->is_trdev) skb->protocol = tr_type_trans(skb, dev); else #endif skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); } } static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb) { struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); struct net_device *dev = skb->dev; if (vpriv == NULL) { pr_info("vpriv = NULL!?!?!?\n"); return; } vpriv->old_pop(vcc, skb); if (vpriv->xoff && atm_may_send(vcc, 0)) { vpriv->xoff = 0; if (netif_running(dev) && netif_queue_stopped(dev)) netif_wake_queue(dev); } } static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg) { struct lec_vcc_priv *vpriv; int bytes_left; struct atmlec_ioc ioc_data; /* Lecd must be up in this case */ bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); if (bytes_left != 0) pr_info("copy from user failed for %d bytes\n", bytes_left); if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || !dev_lec[ioc_data.dev_num]) return -EINVAL; vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); if (!vpriv) return -ENOMEM; vpriv->xoff = 0; vpriv->old_pop = vcc->pop; vcc->user_back = vpriv; vcc->pop = lec_pop; lec_vcc_added(netdev_priv(dev_lec[ioc_data.dev_num]), &ioc_data, vcc, vcc->push); vcc->proto_data = dev_lec[ioc_data.dev_num]; vcc->push = lec_push; return 0; } static int lec_mcast_attach(struct atm_vcc *vcc, int arg) { if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) return -EINVAL; vcc->proto_data = dev_lec[arg]; return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); } /* Initialize device. */ static int lecd_attach(struct atm_vcc *vcc, int arg) { int i; struct lec_priv *priv; if (arg < 0) i = 0; else i = arg; #ifdef CONFIG_TR if (arg >= MAX_LEC_ITF) return -EINVAL; #else /* Reserve the top NUM_TR_DEVS for TR */ if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS)) return -EINVAL; #endif if (!dev_lec[i]) { int is_trdev, size; is_trdev = 0; if (i >= (MAX_LEC_ITF - NUM_TR_DEVS)) is_trdev = 1; size = sizeof(struct lec_priv); #ifdef CONFIG_TR if (is_trdev) dev_lec[i] = alloc_trdev(size); else #endif dev_lec[i] = alloc_etherdev(size); if (!dev_lec[i]) return -ENOMEM; dev_lec[i]->netdev_ops = &lec_netdev_ops; snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i); if (register_netdev(dev_lec[i])) { free_netdev(dev_lec[i]); return -EINVAL; } priv = netdev_priv(dev_lec[i]); priv->is_trdev = is_trdev; } else { priv = netdev_priv(dev_lec[i]); if (priv->lecd) return -EADDRINUSE; } lec_arp_init(priv); priv->itfnum = i; /* LANE2 addition */ priv->lecd = vcc; vcc->dev = &lecatm_dev; vcc_insert_socket(sk_atm(vcc)); vcc->proto_data = dev_lec[i]; set_bit(ATM_VF_META, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); /* Set default values to these variables */ priv->maximum_unknown_frame_count = 1; priv->max_unknown_frame_time = (1 * HZ); priv->vcc_timeout_period = (1200 * HZ); priv->max_retry_count = 1; priv->aging_time = (300 * HZ); priv->forward_delay_time = (15 * HZ); priv->topology_change = 0; priv->arp_response_time = (1 * HZ); priv->flush_timeout = (4 * HZ); priv->path_switching_delay = (6 * HZ); if (dev_lec[i]->flags & IFF_UP) netif_start_queue(dev_lec[i]); __module_get(THIS_MODULE); return i; } #ifdef CONFIG_PROC_FS static const char *lec_arp_get_status_string(unsigned char status) { static const char *const lec_arp_status_string[] = { "ESI_UNKNOWN ", "ESI_ARP_PENDING ", "ESI_VC_PENDING ", "<Undefined> ", "ESI_FLUSH_PENDING ", "ESI_FORWARD_DIRECT" }; if (status > ESI_FORWARD_DIRECT) status = 3; /* ESI_UNDEFINED */ return lec_arp_status_string[status]; } static void lec_info(struct seq_file *seq, struct lec_arp_table *entry) { int i; for (i = 0; i < ETH_ALEN; i++) seq_printf(seq, "%2.2x", entry->mac_addr[i] & 0xff); seq_printf(seq, " "); for (i = 0; i < ATM_ESA_LEN; i++) seq_printf(seq, "%2.2x", entry->atm_addr[i] & 0xff); seq_printf(seq, " %s %4.4x", lec_arp_get_status_string(entry->status), entry->flags & 0xffff); if (entry->vcc) seq_printf(seq, "%3d %3d ", entry->vcc->vpi, entry->vcc->vci); else seq_printf(seq, " "); if (entry->recv_vcc) { seq_printf(seq, " %3d %3d", entry->recv_vcc->vpi, entry->recv_vcc->vci); } seq_putc(seq, '\n'); } struct lec_state { unsigned long flags; struct lec_priv *locked; struct hlist_node *node; struct net_device *dev; int itf; int arp_table; int misc_table; }; static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, loff_t *l) { struct hlist_node *e = state->node; struct lec_arp_table *tmp; if (!e) e = tbl->first; if (e == SEQ_START_TOKEN) { e = tbl->first; --*l; } hlist_for_each_entry_from(tmp, e, next) { if (--*l < 0) break; } state->node = e; return (*l < 0) ? state : NULL; } static void *lec_arp_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { void *v = NULL; int p; for (p = state->arp_table; p < LEC_ARP_TABLE_SIZE; p++) { v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l); if (v) break; } state->arp_table = p; return v; } static void *lec_misc_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { struct hlist_head *lec_misc_tables[] = { &priv->lec_arp_empty_ones, &priv->lec_no_forward, &priv->mcast_fwds }; void *v = NULL; int q; for (q = state->misc_table; q < ARRAY_SIZE(lec_misc_tables); q++) { v = lec_tbl_walk(state, lec_misc_tables[q], l); if (v) break; } state->misc_table = q; return v; } static void *lec_priv_walk(struct lec_state *state, loff_t *l, struct lec_priv *priv) { if (!state->locked) { state->locked = priv; spin_lock_irqsave(&priv->lec_arp_lock, state->flags); } if (!lec_arp_walk(state, l, priv) && !lec_misc_walk(state, l, priv)) { spin_unlock_irqrestore(&priv->lec_arp_lock, state->flags); state->locked = NULL; /* Partial state reset for the next time we get called */ state->arp_table = state->misc_table = 0; } return state->locked; } static void *lec_itf_walk(struct lec_state *state, loff_t *l) { struct net_device *dev; void *v; dev = state->dev ? state->dev : dev_lec[state->itf]; v = (dev && netdev_priv(dev)) ? lec_priv_walk(state, l, netdev_priv(dev)) : NULL; if (!v && dev) { dev_put(dev); /* Partial state reset for the next time we get called */ dev = NULL; } state->dev = dev; return v; } static void *lec_get_idx(struct lec_state *state, loff_t l) { void *v = NULL; for (; state->itf < MAX_LEC_ITF; state->itf++) { v = lec_itf_walk(state, &l); if (v) break; } return v; } static void *lec_seq_start(struct seq_file *seq, loff_t *pos) { struct lec_state *state = seq->private; state->itf = 0; state->dev = NULL; state->locked = NULL; state->arp_table = 0; state->misc_table = 0; state->node = SEQ_START_TOKEN; return *pos ? lec_get_idx(state, *pos) : SEQ_START_TOKEN; } static void lec_seq_stop(struct seq_file *seq, void *v) { struct lec_state *state = seq->private; if (state->dev) { spin_unlock_irqrestore(&state->locked->lec_arp_lock, state->flags); dev_put(state->dev); } } static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct lec_state *state = seq->private; v = lec_get_idx(state, 1); *pos += !!PTR_ERR(v); return v; } static int lec_seq_show(struct seq_file *seq, void *v) { static const char lec_banner[] = "Itf MAC ATM destination" " Status Flags " "VPI/VCI Recv VPI/VCI\n"; if (v == SEQ_START_TOKEN) seq_puts(seq, lec_banner); else { struct lec_state *state = seq->private; struct net_device *dev = state->dev; struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next); seq_printf(seq, "%s ", dev->name); lec_info(seq, entry); } return 0; } static const struct seq_operations lec_seq_ops = { .start = lec_seq_start, .next = lec_seq_next, .stop = lec_seq_stop, .show = lec_seq_show, }; static int lec_seq_open(struct inode *inode, struct file *file) { return seq_open_private(file, &lec_seq_ops, sizeof(struct lec_state)); } static const struct file_operations lec_seq_fops = { .owner = THIS_MODULE, .open = lec_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct atm_vcc *vcc = ATM_SD(sock); int err = 0; switch (cmd) { case ATMLEC_CTRL: case ATMLEC_MCAST: case ATMLEC_DATA: if (!capable(CAP_NET_ADMIN)) return -EPERM; break; default: return -ENOIOCTLCMD; } switch (cmd) { case ATMLEC_CTRL: err = lecd_attach(vcc, (int)arg); if (err >= 0) sock->state = SS_CONNECTED; break; case ATMLEC_MCAST: err = lec_mcast_attach(vcc, (int)arg); break; case ATMLEC_DATA: err = lec_vcc_attach(vcc, (void __user *)arg); break; } return err; } static struct atm_ioctl lane_ioctl_ops = { .owner = THIS_MODULE, .ioctl = lane_ioctl, }; static int __init lane_module_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *p; p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); if (!p) { pr_err("Unable to initialize /proc/net/atm/lec\n"); return -ENOMEM; } #endif register_atm_ioctl(&lane_ioctl_ops); pr_info("lec.c: initialized\n"); return 0; } static void __exit lane_module_cleanup(void) { int i; remove_proc_entry("lec", atm_proc_root); deregister_atm_ioctl(&lane_ioctl_ops); for (i = 0; i < MAX_LEC_ITF; i++) { if (dev_lec[i] != NULL) { unregister_netdev(dev_lec[i]); free_netdev(dev_lec[i]); dev_lec[i] = NULL; } } } module_init(lane_module_init); module_exit(lane_module_cleanup); /* * LANE2: 3.1.3, LE_RESOLVE.request * Non force allocates memory and fills in *tlvs, fills in *sizeoftlvs. * If sizeoftlvs == NULL the default TLVs associated with with this * lec will be used. * If dst_mac == NULL, targetless LE_ARP will be sent */ static int lane2_resolve(struct net_device *dev, const u8 *dst_mac, int force, u8 **tlvs, u32 *sizeoftlvs) { unsigned long flags; struct lec_priv *priv = netdev_priv(dev); struct lec_arp_table *table; struct sk_buff *skb; int retval; if (force == 0) { spin_lock_irqsave(&priv->lec_arp_lock, flags); table = lec_arp_find(priv, dst_mac); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (table == NULL) return -1; *tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC); if (*tlvs == NULL) return -1; *sizeoftlvs = table->sizeoftlvs; return 0; } if (sizeoftlvs == NULL) retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, NULL); else { skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC); if (skb == NULL) return -1; skb->len = *sizeoftlvs; skb_copy_to_linear_data(skb, *tlvs, *sizeoftlvs); retval = send_to_lecd(priv, l_arp_xmt, dst_mac, NULL, skb); } return retval; } /* * LANE2: 3.1.4, LE_ASSOCIATE.request * Associate the *tlvs with the *lan_dst address. * Will overwrite any previous association * Returns 1 for success, 0 for failure (out of memory) * */ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst, const u8 *tlvs, u32 sizeoftlvs) { int retval; struct sk_buff *skb; struct lec_priv *priv = netdev_priv(dev); if (compare_ether_addr(lan_dst, dev->dev_addr)) return 0; /* not our mac address */ kfree(priv->tlvs); /* NULL if there was no previous association */ priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); if (priv->tlvs == NULL) return 0; priv->sizeoftlvs = sizeoftlvs; skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); if (skb == NULL) return 0; skb->len = sizeoftlvs; skb_copy_to_linear_data(skb, tlvs, sizeoftlvs); retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); if (retval != 0) pr_info("lec.c: lane2_associate_req() failed\n"); /* * If the previous association has changed we must * somehow notify other LANE entities about the change */ return 1; } /* * LANE2: 3.1.5, LE_ASSOCIATE.indication * */ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr, const u8 *tlvs, u32 sizeoftlvs) { #if 0 int i = 0; #endif struct lec_priv *priv = netdev_priv(dev); #if 0 /* * Why have the TLVs in LE_ARP entries * since we do not use them? When you * uncomment this code, make sure the * TLVs get freed when entry is killed */ struct lec_arp_table *entry = lec_arp_find(priv, mac_addr); if (entry == NULL) return; /* should not happen */ kfree(entry->tlvs); entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); if (entry->tlvs == NULL) return; entry->sizeoftlvs = sizeoftlvs; #endif #if 0 pr_info("\n"); pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs); while (i < sizeoftlvs) pr_cont("%02x ", tlvs[i++]); pr_cont("\n"); #endif /* tell MPOA about the TLVs we saw */ if (priv->lane2_ops && priv->lane2_ops->associate_indicator) { priv->lane2_ops->associate_indicator(dev, mac_addr, tlvs, sizeoftlvs); } } /* * Here starts what used to lec_arpc.c * * lec_arpc.c was added here when making * lane client modular. October 1997 */ #include <linux/types.h> #include <linux/timer.h> #include <linux/param.h> #include <asm/atomic.h> #include <linux/inetdevice.h> #include <net/route.h> #if 0 #define pr_debug(format, args...) /* #define pr_debug printk */ #endif #define DEBUG_ARP_TABLE 0 #define LEC_ARP_REFRESH_INTERVAL (3*HZ) static void lec_arp_check_expire(struct work_struct *work); static void lec_arp_expire_arp(unsigned long data); /* * Arp table funcs */ #define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1)) /* * Initialization of arp-cache */ static void lec_arp_init(struct lec_priv *priv) { unsigned short i; for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); INIT_HLIST_HEAD(&priv->lec_no_forward); INIT_HLIST_HEAD(&priv->mcast_fwds); spin_lock_init(&priv->lec_arp_lock); INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } static void lec_arp_clear_vccs(struct lec_arp_table *entry) { if (entry->vcc) { struct atm_vcc *vcc = entry->vcc; struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); struct net_device *dev = (struct net_device *)vcc->proto_data; vcc->pop = vpriv->old_pop; if (vpriv->xoff) netif_wake_queue(dev); kfree(vpriv); vcc->user_back = NULL; vcc->push = entry->old_push; vcc_release_async(vcc, -EPIPE); entry->vcc = NULL; } if (entry->recv_vcc) { entry->recv_vcc->push = entry->old_recv_push; vcc_release_async(entry->recv_vcc, -EPIPE); entry->recv_vcc = NULL; } } /* * Insert entry to lec_arp_table * LANE2: Add to the end of the list to satisfy 8.1.13 */ static inline void lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry) { struct hlist_head *tmp; tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; hlist_add_head(&entry->next, tmp); pr_debug("Added entry:%pM\n", entry->mac_addr); } /* * Remove entry from lec_arp_table */ static int lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) { struct hlist_node *node; struct lec_arp_table *entry; int i, remove_vcc = 1; if (!to_remove) return -1; hlist_del(&to_remove->next); del_timer(&to_remove->timer); /* * If this is the only MAC connected to this VCC, * also tear down the VCC */ if (to_remove->status >= ESI_FLUSH_PENDING) { /* * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT */ for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (memcmp(to_remove->atm_addr, entry->atm_addr, ATM_ESA_LEN) == 0) { remove_vcc = 0; break; } } } if (remove_vcc) lec_arp_clear_vccs(to_remove); } skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */ pr_debug("Removed entry:%pM\n", to_remove->mac_addr); return 0; } #if DEBUG_ARP_TABLE static const char *get_status_string(unsigned char st) { switch (st) { case ESI_UNKNOWN: return "ESI_UNKNOWN"; case ESI_ARP_PENDING: return "ESI_ARP_PENDING"; case ESI_VC_PENDING: return "ESI_VC_PENDING"; case ESI_FLUSH_PENDING: return "ESI_FLUSH_PENDING"; case ESI_FORWARD_DIRECT: return "ESI_FORWARD_DIRECT"; } return "<UNKNOWN>"; } static void dump_arp_table(struct lec_priv *priv) { struct hlist_node *node; struct lec_arp_table *rulla; char buf[256]; int i, j, offset; pr_info("Dump %p:\n", priv); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) { offset = 0; offset += sprintf(buf, "%d: %p\n", i, rulla); offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc-> vpi : 0, rulla->recv_vcc ? rulla->recv_vcc-> vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } } if (!hlist_empty(&priv->lec_no_forward)) pr_info("No forward\n"); hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } if (!hlist_empty(&priv->lec_arp_empty_ones)) pr_info("Empty ones\n"); hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s", buf); } if (!hlist_empty(&priv->mcast_fwds)) pr_info("Multicast Forward VCCs\n"); hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { offset = 0; offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr); offset += sprintf(buf + offset, " Atm:"); for (j = 0; j < ATM_ESA_LEN; j++) { offset += sprintf(buf + offset, "%2.2x ", rulla->atm_addr[j] & 0xff); } offset += sprintf(buf + offset, "Vcc vpi:%d vci:%d, Recv_vcc vpi:%d vci:%d Last_used:%lx, Timestamp:%lx, No_tries:%d ", rulla->vcc ? rulla->vcc->vpi : 0, rulla->vcc ? rulla->vcc->vci : 0, rulla->recv_vcc ? rulla->recv_vcc->vpi : 0, rulla->recv_vcc ? rulla->recv_vcc->vci : 0, rulla->last_used, rulla->timestamp, rulla->no_tries); offset += sprintf(buf + offset, "Flags:%x, Packets_flooded:%x, Status: %s ", rulla->flags, rulla->packets_flooded, get_status_string(rulla->status)); pr_info("%s\n", buf); } } #else #define dump_arp_table(priv) do { } while (0) #endif /* * Destruction of arp-cache */ static void lec_arp_destroy(struct lec_priv *priv) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; cancel_delayed_work_sync(&priv->lec_arp_work); /* * Remove all entries */ spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { lec_arp_remove(priv, entry); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); } hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { del_timer_sync(&entry->timer); lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { del_timer_sync(&entry->timer); lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->lec_no_forward); hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ lec_arp_clear_vccs(entry); hlist_del(&entry->next); lec_arp_put(entry); } INIT_HLIST_HEAD(&priv->mcast_fwds); priv->mcast_vcc = NULL; spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } /* * Find entry by mac_address */ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, const unsigned char *mac_addr) { struct hlist_node *node; struct hlist_head *head; struct lec_arp_table *entry; pr_debug("%pM\n", mac_addr); head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; hlist_for_each_entry(entry, node, head, next) { if (!compare_ether_addr(mac_addr, entry->mac_addr)) return entry; } return NULL; } static struct lec_arp_table *make_entry(struct lec_priv *priv, const unsigned char *mac_addr) { struct lec_arp_table *to_return; to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); if (!to_return) { pr_info("LEC: Arp entry kmalloc failed\n"); return NULL; } memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); INIT_HLIST_NODE(&to_return->next); setup_timer(&to_return->timer, lec_arp_expire_arp, (unsigned long)to_return); to_return->last_used = jiffies; to_return->priv = priv; skb_queue_head_init(&to_return->tx_wait); atomic_set(&to_return->usage, 1); return to_return; } /* Arp sent timer expired */ static void lec_arp_expire_arp(unsigned long data) { struct lec_arp_table *entry; entry = (struct lec_arp_table *)data; pr_debug("\n"); if (entry->status == ESI_ARP_PENDING) { if (entry->no_tries <= entry->priv->max_retry_count) { if (entry->is_rdesc) send_to_lecd(entry->priv, l_rdesc_arp_xmt, entry->mac_addr, NULL, NULL); else send_to_lecd(entry->priv, l_arp_xmt, entry->mac_addr, NULL, NULL); entry->no_tries++; } mod_timer(&entry->timer, jiffies + (1 * HZ)); } } /* Unknown/unused vcc expire, remove associated entry */ static void lec_arp_expire_vcc(unsigned long data) { unsigned long flags; struct lec_arp_table *to_remove = (struct lec_arp_table *)data; struct lec_priv *priv = (struct lec_priv *)to_remove->priv; del_timer(&to_remove->timer); pr_debug("%p %p: vpi:%d vci:%d\n", to_remove, priv, to_remove->vcc ? to_remove->recv_vcc->vpi : 0, to_remove->vcc ? to_remove->recv_vcc->vci : 0); spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_del(&to_remove->next); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); lec_arp_clear_vccs(to_remove); lec_arp_put(to_remove); } static bool __lec_arp_check_expire(struct lec_arp_table *entry, unsigned long now, struct lec_priv *priv) { unsigned long time_to_check; if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change) time_to_check = priv->forward_delay_time; else time_to_check = priv->aging_time; pr_debug("About to expire: %lx - %lx > %lx\n", now, entry->last_used, time_to_check); if (time_after(now, entry->last_used + time_to_check) && !(entry->flags & LEC_PERMANENT_FLAG) && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ /* Remove entry */ pr_debug("Entry timed out\n"); lec_arp_remove(priv, entry); lec_arp_put(entry); } else { /* Something else */ if ((entry->status == ESI_VC_PENDING || entry->status == ESI_ARP_PENDING) && time_after_eq(now, entry->timestamp + priv->max_unknown_frame_time)) { entry->timestamp = jiffies; entry->packets_flooded = 0; if (entry->status == ESI_VC_PENDING) send_to_lecd(priv, l_svc_setup, entry->mac_addr, entry->atm_addr, NULL); } if (entry->status == ESI_FLUSH_PENDING && time_after_eq(now, entry->timestamp + priv->path_switching_delay)) { lec_arp_hold(entry); return true; } } return false; } /* * Expire entries. * 1. Re-set timer * 2. For each entry, delete entries that have aged past the age limit. * 3. For each entry, depending on the status of the entry, perform * the following maintenance. * a. If status is ESI_VC_PENDING or ESI_ARP_PENDING then if the * tick_count is above the max_unknown_frame_time, clear * the tick_count to zero and clear the packets_flooded counter * to zero. This supports the packet rate limit per address * while flooding unknowns. * b. If the status is ESI_FLUSH_PENDING and the tick_count is greater * than or equal to the path_switching_delay, change the status * to ESI_FORWARD_DIRECT. This causes the flush period to end * regardless of the progress of the flush protocol. */ static void lec_arp_check_expire(struct work_struct *work) { unsigned long flags; struct lec_priv *priv = container_of(work, struct lec_priv, lec_arp_work.work); struct hlist_node *node, *next; struct lec_arp_table *entry; unsigned long now; int i; pr_debug("%p\n", priv); now = jiffies; restart: spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (__lec_arp_check_expire(entry, now, priv)) { struct sk_buff *skb; struct atm_vcc *vcc = entry->vcc; spin_unlock_irqrestore(&priv->lec_arp_lock, flags); while ((skb = skb_dequeue(&entry->tx_wait))) lec_send(vcc, skb); entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); goto restart; } } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } /* * Try to find vcc where mac_address is attached. * */ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, const unsigned char *mac_to_find, int is_rdesc, struct lec_arp_table **ret_entry) { unsigned long flags; struct lec_arp_table *entry; struct atm_vcc *found; if (mac_to_find[0] & 0x01) { switch (priv->lane_version) { case 1: return priv->mcast_vcc; case 2: /* LANE2 wants arp for multicast addresses */ if (!compare_ether_addr(mac_to_find, bus_mac)) return priv->mcast_vcc; break; default: break; } } spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mac_to_find); if (entry) { if (entry->status == ESI_FORWARD_DIRECT) { /* Connection Ok */ entry->last_used = jiffies; lec_arp_hold(entry); *ret_entry = entry; found = entry->vcc; goto out; } /* * If the LE_ARP cache entry is still pending, reset count to 0 * so another LE_ARP request can be made for this frame. */ if (entry->status == ESI_ARP_PENDING) entry->no_tries = 0; /* * Data direct VC not yet set up, check to see if the unknown * frame count is greater than the limit. If the limit has * not been reached, allow the caller to send packet to * BUS. */ if (entry->status != ESI_FLUSH_PENDING && entry->packets_flooded < priv->maximum_unknown_frame_count) { entry->packets_flooded++; pr_debug("Flooding..\n"); found = priv->mcast_vcc; goto out; } /* * We got here because entry->status == ESI_FLUSH_PENDING * or BUS flood limit was reached for an entry which is * in ESI_ARP_PENDING or ESI_VC_PENDING state. */ lec_arp_hold(entry); *ret_entry = entry; pr_debug("entry->status %d entry->vcc %p\n", entry->status, entry->vcc); found = NULL; } else { /* No matching entry was found */ entry = make_entry(priv, mac_to_find); pr_debug("Making entry\n"); if (!entry) { found = priv->mcast_vcc; goto out; } lec_arp_add(priv, entry); /* We want arp-request(s) to be sent */ entry->packets_flooded = 1; entry->status = ESI_ARP_PENDING; entry->no_tries = 1; entry->last_used = entry->timestamp = jiffies; entry->is_rdesc = is_rdesc; if (entry->is_rdesc) send_to_lecd(priv, l_rdesc_arp_xmt, mac_to_find, NULL, NULL); else send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL); entry->timer.expires = jiffies + (1 * HZ); entry->timer.function = lec_arp_expire_arp; add_timer(&entry->timer); found = priv->mcast_vcc; } out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return found; } static int lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long permanent) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; pr_debug("\n"); spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) && (permanent || !(entry->flags & LEC_PERMANENT_FLAG))) { lec_arp_remove(priv, entry); lec_arp_put(entry); } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return 0; } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return -1; } /* * Notifies: Response to arp_request (atm_addr != NULL) */ static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, const unsigned char *atm_addr, unsigned long remoteflag, unsigned int targetless_le_arp) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; int i; pr_debug("%smac:%pM\n", (targetless_le_arp) ? "targetless " : "", mac_addr); spin_lock_irqsave(&priv->lec_arp_lock, flags); entry = lec_arp_find(priv, mac_addr); if (entry == NULL && targetless_le_arp) goto out; /* * LANE2: ignore targetless LE_ARPs for which * we have no entry in the cache. 7.1.30 */ if (!hlist_empty(&priv->lec_arp_empty_ones)) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { hlist_del(&entry->next); del_timer(&entry->timer); tmp = lec_arp_find(priv, mac_addr); if (tmp) { del_timer(&tmp->timer); tmp->status = ESI_FORWARD_DIRECT; memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN); tmp->vcc = entry->vcc; tmp->old_push = entry->old_push; tmp->last_used = jiffies; del_timer(&entry->timer); lec_arp_put(entry); entry = tmp; } else { entry->status = ESI_FORWARD_DIRECT; memcpy(entry->mac_addr, mac_addr, ETH_ALEN); entry->last_used = jiffies; lec_arp_add(priv, entry); } if (remoteflag) entry->flags |= LEC_REMOTE_FLAG; else entry->flags &= ~LEC_REMOTE_FLAG; pr_debug("After update\n"); dump_arp_table(priv); goto out; } } } entry = lec_arp_find(priv, mac_addr); if (!entry) { entry = make_entry(priv, mac_addr); if (!entry) goto out; entry->status = ESI_UNKNOWN; lec_arp_add(priv, entry); /* Temporary, changes before end of function */ } memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); del_timer(&entry->timer); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) { if (entry != tmp && !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { /* Vcc to this host exists */ if (tmp->status > ESI_VC_PENDING) { /* * ESI_FLUSH_PENDING, * ESI_FORWARD_DIRECT */ entry->vcc = tmp->vcc; entry->old_push = tmp->old_push; } entry->status = tmp->status; break; } } } if (remoteflag) entry->flags |= LEC_REMOTE_FLAG; else entry->flags &= ~LEC_REMOTE_FLAG; if (entry->status == ESI_ARP_PENDING || entry->status == ESI_UNKNOWN) { entry->status = ESI_VC_PENDING; send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL); } pr_debug("After update2\n"); dump_arp_table(priv); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } /* * Notifies: Vcc setup ready */ static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, struct atm_vcc *vcc, void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb)) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i, found_entry = 0; spin_lock_irqsave(&priv->lec_arp_lock, flags); /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */ if (ioc_data->receive == 2) { pr_debug("LEC_ARP: Attaching mcast forward\n"); #if 0 entry = lec_arp_find(priv, bus_mac); if (!entry) { pr_info("LEC_ARP: Multicast entry not found!\n"); goto out; } memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; #endif entry = make_entry(priv, bus_mac); if (entry == NULL) goto out; del_timer(&entry->timer); memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; hlist_add_head(&entry->next, &priv->mcast_fwds); goto out; } else if (ioc_data->receive == 1) { /* * Vcc which we don't want to make default vcc, * attach it anyway. */ pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", ioc_data->atm_addr[0], ioc_data->atm_addr[1], ioc_data->atm_addr[2], ioc_data->atm_addr[3], ioc_data->atm_addr[4], ioc_data->atm_addr[5], ioc_data->atm_addr[6], ioc_data->atm_addr[7], ioc_data->atm_addr[8], ioc_data->atm_addr[9], ioc_data->atm_addr[10], ioc_data->atm_addr[11], ioc_data->atm_addr[12], ioc_data->atm_addr[13], ioc_data->atm_addr[14], ioc_data->atm_addr[15], ioc_data->atm_addr[16], ioc_data->atm_addr[17], ioc_data->atm_addr[18], ioc_data->atm_addr[19]); entry = make_entry(priv, bus_mac); if (entry == NULL) goto out; memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); memset(entry->mac_addr, 0, ETH_ALEN); entry->recv_vcc = vcc; entry->old_recv_push = old_push; entry->status = ESI_UNKNOWN; entry->timer.expires = jiffies + priv->vcc_timeout_period; entry->timer.function = lec_arp_expire_vcc; hlist_add_head(&entry->next, &priv->lec_no_forward); add_timer(&entry->timer); dump_arp_table(priv); goto out; } pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", ioc_data->atm_addr[0], ioc_data->atm_addr[1], ioc_data->atm_addr[2], ioc_data->atm_addr[3], ioc_data->atm_addr[4], ioc_data->atm_addr[5], ioc_data->atm_addr[6], ioc_data->atm_addr[7], ioc_data->atm_addr[8], ioc_data->atm_addr[9], ioc_data->atm_addr[10], ioc_data->atm_addr[11], ioc_data->atm_addr[12], ioc_data->atm_addr[13], ioc_data->atm_addr[14], ioc_data->atm_addr[15], ioc_data->atm_addr[16], ioc_data->atm_addr[17], ioc_data->atm_addr[18], ioc_data->atm_addr[19]); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (memcmp (ioc_data->atm_addr, entry->atm_addr, ATM_ESA_LEN) == 0) { pr_debug("LEC_ARP: Attaching data direct\n"); pr_debug("Currently -> Vcc: %d, Rvcc:%d\n", entry->vcc ? entry->vcc->vci : 0, entry->recv_vcc ? entry->recv_vcc-> vci : 0); found_entry = 1; del_timer(&entry->timer); entry->vcc = vcc; entry->old_push = old_push; if (entry->status == ESI_VC_PENDING) { if (priv->maximum_unknown_frame_count == 0) entry->status = ESI_FORWARD_DIRECT; else { entry->timestamp = jiffies; entry->status = ESI_FLUSH_PENDING; #if 0 send_to_lecd(priv, l_flush_xmt, NULL, entry->atm_addr, NULL); #endif } } else { /* * They were forming a connection * to us, and we to them. Our * ATM address is numerically lower * than theirs, so we make connection * we formed into default VCC (8.1.11). * Connection they made gets torn * down. This might confuse some * clients. Can be changed if * someone reports trouble... */ ; } } } } if (found_entry) { pr_debug("After vcc was added\n"); dump_arp_table(priv); goto out; } /* * Not found, snatch address from first data packet that arrives * from this vcc */ entry = make_entry(priv, bus_mac); if (!entry) goto out; entry->vcc = vcc; entry->old_push = old_push; memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); memset(entry->mac_addr, 0, ETH_ALEN); entry->status = ESI_UNKNOWN; hlist_add_head(&entry->next, &priv->lec_arp_empty_ones); entry->timer.expires = jiffies + priv->vcc_timeout_period; entry->timer.function = lec_arp_expire_vcc; add_timer(&entry->timer); pr_debug("After vcc was added\n"); dump_arp_table(priv); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i; pr_debug("%lx\n", tran_id); restart: spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (entry->flush_tran_id == tran_id && entry->status == ESI_FLUSH_PENDING) { struct sk_buff *skb; struct atm_vcc *vcc = entry->vcc; lec_arp_hold(entry); spin_unlock_irqrestore(&priv->lec_arp_lock, flags); while ((skb = skb_dequeue(&entry->tx_wait))) lec_send(vcc, skb); entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); pr_debug("LEC_ARP: Flushed\n"); goto restart; } } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); dump_arp_table(priv); } static void lec_set_flush_tran_id(struct lec_priv *priv, const unsigned char *atm_addr, unsigned long tran_id) { unsigned long flags; struct hlist_node *node; struct lec_arp_table *entry; int i; spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { entry->flush_tran_id = tran_id; pr_debug("Set flush transaction id to %lx for %p\n", tran_id, entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc) { unsigned long flags; unsigned char mac_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct lec_arp_table *to_add; struct lec_vcc_priv *vpriv; int err = 0; vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL); if (!vpriv) return -ENOMEM; vpriv->xoff = 0; vpriv->old_pop = vcc->pop; vcc->user_back = vpriv; vcc->pop = lec_pop; spin_lock_irqsave(&priv->lec_arp_lock, flags); to_add = make_entry(priv, mac_addr); if (!to_add) { vcc->pop = vpriv->old_pop; kfree(vpriv); err = -ENOMEM; goto out; } memcpy(to_add->atm_addr, vcc->remote.sas_addr.prv, ATM_ESA_LEN); to_add->status = ESI_FORWARD_DIRECT; to_add->flags |= LEC_PERMANENT_FLAG; to_add->vcc = vcc; to_add->old_push = vcc->push; vcc->push = lec_push; priv->mcast_vcc = vcc; lec_arp_add(priv, to_add); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); return err; } static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry; int i; pr_debug("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n", vcc->vpi, vcc->vci); dump_arp_table(priv); spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { if (vcc == entry->vcc) { lec_arp_remove(priv, entry); lec_arp_put(entry); if (priv->mcast_vcc == vcc) priv->mcast_vcc = NULL; } } } hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (entry->vcc == vcc) { lec_arp_clear_vccs(entry); del_timer(&entry->timer); hlist_del(&entry->next); lec_arp_put(entry); } } hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { if (entry->recv_vcc == vcc) { lec_arp_clear_vccs(entry); del_timer(&entry->timer); hlist_del(&entry->next); lec_arp_put(entry); } } hlist_for_each_entry_safe(entry, node, next, &priv->mcast_fwds, next) { if (entry->recv_vcc == vcc) { lec_arp_clear_vccs(entry); /* No timer, LANEv2 7.1.20 and 2.3.5.3 */ hlist_del(&entry->next); lec_arp_put(entry); } } spin_unlock_irqrestore(&priv->lec_arp_lock, flags); dump_arp_table(priv); } static void lec_arp_check_empties(struct lec_priv *priv, struct atm_vcc *vcc, struct sk_buff *skb) { unsigned long flags; struct hlist_node *node, *next; struct lec_arp_table *entry, *tmp; struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data; unsigned char *src; #ifdef CONFIG_TR struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data; if (priv->is_trdev) src = tr_hdr->h_source; else #endif src = hdr->h_source; spin_lock_irqsave(&priv->lec_arp_lock, flags); hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { if (vcc == entry->vcc) { del_timer(&entry->timer); memcpy(entry->mac_addr, src, ETH_ALEN); entry->status = ESI_FORWARD_DIRECT; entry->last_used = jiffies; /* We might have got an entry */ tmp = lec_arp_find(priv, src); if (tmp) { lec_arp_remove(priv, tmp); lec_arp_put(tmp); } hlist_del(&entry->next); lec_arp_add(priv, entry); goto out; } } pr_debug("LEC_ARP: Arp_check_empties: entry not found!\n"); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } MODULE_LICENSE("GPL");
gpl-2.0
MatiasBjorling/linux
drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
2877
3872
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/gpuobj.h> #include <core/class.h> #include <subdev/fb.h> #include <subdev/vm/nv04.h> #include <engine/dmaobj.h> struct nv04_dmaeng_priv { struct nouveau_dmaeng base; }; static int nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, struct nouveau_object *parent, struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng); struct nouveau_gpuobj *gpuobj; u32 flags0 = nv_mclass(dmaobj); u32 flags2 = 0x00000000; u64 offset = dmaobj->start & 0xfffff000; u64 adjust = dmaobj->start & 0x00000fff; u32 length = dmaobj->limit - dmaobj->start; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { case NV03_CHANNEL_DMA_CLASS: case NV10_CHANNEL_DMA_CLASS: case NV17_CHANNEL_DMA_CLASS: case NV40_CHANNEL_DMA_CLASS: break; default: return -EINVAL; } } if (dmaobj->target == NV_MEM_TARGET_VM) { if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; if (!dmaobj->start) return nouveau_gpuobj_dup(parent, pgt, pgpuobj); offset = nv_ro32(pgt, 8 + (offset >> 10)); offset &= 0xfffff000; } dmaobj->target = NV_MEM_TARGET_PCI; dmaobj->access = NV_MEM_ACCESS_RW; } switch (dmaobj->target) { case NV_MEM_TARGET_VRAM: flags0 |= 0x00003000; break; case NV_MEM_TARGET_PCI: flags0 |= 0x00023000; break; case NV_MEM_TARGET_PCI_NOSNOOP: flags0 |= 0x00033000; break; default: return -EINVAL; } switch (dmaobj->access) { case NV_MEM_ACCESS_RO: flags0 |= 0x00004000; break; case NV_MEM_ACCESS_WO: flags0 |= 0x00008000; case NV_MEM_ACCESS_RW: flags2 |= 0x00000002; break; default: return -EINVAL; } ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj); *pgpuobj = gpuobj; if (ret == 0) { nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20)); nv_wo32(*pgpuobj, 0x04, length); nv_wo32(*pgpuobj, 0x08, flags2 | offset); nv_wo32(*pgpuobj, 0x0c, flags2 | offset); } return ret; } static int nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv04_dmaeng_priv *priv; int ret; ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; nv_engine(priv)->sclass = nouveau_dmaobj_sclass; priv->base.bind = nv04_dmaobj_bind; return 0; } struct nouveau_oclass nv04_dmaeng_oclass = { .handle = NV_ENGINE(DMAOBJ, 0x04), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv04_dmaeng_ctor, .dtor = _nouveau_dmaeng_dtor, .init = _nouveau_dmaeng_init, .fini = _nouveau_dmaeng_fini, }, };
gpl-2.0
kashifmin/BLU_LIFE_ONE
arch/tile/kernel/setup.c
2877
44723
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mmzone.h> #include <linux/bootmem.h> #include <linux/module.h> #include <linux/node.h> #include <linux/cpu.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/kexec.h> #include <linux/pci.h> #include <linux/initrd.h> #include <linux/io.h> #include <linux/highmem.h> #include <linux/smp.h> #include <linux/timex.h> #include <asm/setup.h> #include <asm/sections.h> #include <asm/cacheflush.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <hv/hypervisor.h> #include <arch/interrupts.h> /* <linux/smp.h> doesn't provide this definition. */ #ifndef CONFIG_SMP #define setup_max_cpus 1 #endif static inline int ABS(int x) { return x >= 0 ? x : -x; } /* Chip information */ char chip_model[64] __write_once; struct pglist_data node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); /* We only create bootmem data on node 0. */ static bootmem_data_t __initdata node0_bdata; /* Information on the NUMA nodes that we compute early */ unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; unsigned long __initdata node_free_pfn[MAX_NUMNODES]; static unsigned long __initdata node_percpu[MAX_NUMNODES]; #ifdef CONFIG_HIGHMEM /* Page frame index of end of lowmem on each controller. */ unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; /* Number of pages that can be mapped into lowmem. */ static unsigned long __initdata mappable_physpages; #endif /* Data on which physical memory controller corresponds to which NUMA node */ int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; #ifdef CONFIG_HIGHMEM /* Map information from VAs to PAs */ unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] __write_once __attribute__((aligned(L2_CACHE_BYTES))); EXPORT_SYMBOL(pbase_map); /* Map information from PAs to VAs */ void *vbase_map[NR_PA_HIGHBIT_VALUES] __write_once __attribute__((aligned(L2_CACHE_BYTES))); EXPORT_SYMBOL(vbase_map); #endif /* Node number as a function of the high PA bits */ int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; EXPORT_SYMBOL(highbits_to_node); static unsigned int __initdata maxmem_pfn = -1U; static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1U }; static nodemask_t __initdata isolnodes; #ifdef CONFIG_PCI enum { DEFAULT_PCI_RESERVE_MB = 64 }; static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; unsigned long __initdata pci_reserve_start_pfn = -1U; unsigned long __initdata pci_reserve_end_pfn = -1U; #endif static int __init setup_maxmem(char *str) { unsigned long long maxmem; if (str == NULL || (maxmem = memparse(str, NULL)) == 0) return -EINVAL; maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); pr_info("Forcing RAM used to no more than %dMB\n", maxmem_pfn >> (20 - PAGE_SHIFT)); return 0; } early_param("maxmem", setup_maxmem); static int __init setup_maxnodemem(char *str) { char *endp; unsigned long long maxnodemem; long node; node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; if (node >= MAX_NUMNODES || *endp != ':') return -EINVAL; maxnodemem = memparse(endp+1, NULL); maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); pr_info("Forcing RAM used on node %ld to no more than %dMB\n", node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); return 0; } early_param("maxnodemem", setup_maxnodemem); static int __init setup_isolnodes(char *str) { char buf[MAX_NUMNODES * 5]; if (str == NULL || nodelist_parse(str, isolnodes) != 0) return -EINVAL; nodelist_scnprintf(buf, sizeof(buf), isolnodes); pr_info("Set isolnodes value to '%s'\n", buf); return 0; } early_param("isolnodes", setup_isolnodes); #ifdef CONFIG_PCI static int __init setup_pci_reserve(char* str) { unsigned long mb; if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || mb > 3 * 1024) return -EINVAL; pci_reserve_mb = mb; pr_info("Reserving %dMB for PCIE root complex mappings\n", pci_reserve_mb); return 0; } early_param("pci_reserve", setup_pci_reserve); #endif #ifndef __tilegx__ /* * vmalloc=size forces the vmalloc area to be exactly 'size' bytes. * This can be used to increase (or decrease) the vmalloc area. */ static int __init parse_vmalloc(char *arg) { if (!arg) return -EINVAL; VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK; /* See validate_va() for more on this test. */ if ((long)_VMALLOC_START >= 0) early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n", VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL); return 0; } early_param("vmalloc", parse_vmalloc); #endif #ifdef CONFIG_HIGHMEM /* * Determine for each controller where its lowmem is mapped and how much of * it is mapped there. On controller zero, the first few megabytes are * already mapped in as code at MEM_SV_INTRPT, so in principle we could * start our data mappings higher up, but for now we don't bother, to avoid * additional confusion. * * One question is whether, on systems with more than 768 Mb and * controllers of different sizes, to map in a proportionate amount of * each one, or to try to map the same amount from each controller. * (E.g. if we have three controllers with 256MB, 1GB, and 256MB * respectively, do we map 256MB from each, or do we map 128 MB, 512 * MB, and 128 MB respectively?) For now we use a proportionate * solution like the latter. * * The VA/PA mapping demands that we align our decisions at 16 MB * boundaries so that we can rapidly convert VA to PA. */ static void *__init setup_pa_va_mapping(void) { unsigned long curr_pages = 0; unsigned long vaddr = PAGE_OFFSET; nodemask_t highonlynodes = isolnodes; int i, j; memset(pbase_map, -1, sizeof(pbase_map)); memset(vbase_map, -1, sizeof(vbase_map)); /* Node zero cannot be isolated for LOWMEM purposes. */ node_clear(0, highonlynodes); /* Count up the number of pages on non-highonlynodes controllers. */ mappable_physpages = 0; for_each_online_node(i) { if (!node_isset(i, highonlynodes)) mappable_physpages += node_end_pfn[i] - node_start_pfn[i]; } for_each_online_node(i) { unsigned long start = node_start_pfn[i]; unsigned long end = node_end_pfn[i]; unsigned long size = end - start; unsigned long vaddr_end; if (node_isset(i, highonlynodes)) { /* Mark this controller as having no lowmem. */ node_lowmem_end_pfn[i] = start; continue; } curr_pages += size; if (mappable_physpages > MAXMEM_PFN) { vaddr_end = PAGE_OFFSET + (((u64)curr_pages * MAXMEM_PFN / mappable_physpages) << PAGE_SHIFT); } else { vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); } for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) { unsigned long this_pfn = start + (j << HUGETLB_PAGE_ORDER); pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn; if (vbase_map[__pfn_to_highbits(this_pfn)] == (void *)-1) vbase_map[__pfn_to_highbits(this_pfn)] = (void *)(vaddr & HPAGE_MASK); } node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER); BUG_ON(node_lowmem_end_pfn[i] > end); } /* Return highest address of any mapped memory. */ return (void *)vaddr; } #endif /* CONFIG_HIGHMEM */ /* * Register our most important memory mappings with the debug stub. * * This is up to 4 mappings for lowmem, one mapping per memory * controller, plus one for our text segment. */ static void __cpuinit store_permanent_mappings(void) { int i; for_each_online_node(i) { HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT; #ifdef CONFIG_HIGHMEM HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i]; #else HV_PhysAddr high_mapped_pa = node_end_pfn[i]; #endif unsigned long pages = high_mapped_pa - node_start_pfn[i]; HV_VirtAddr addr = (HV_VirtAddr) __va(pa); hv_store_mapping(addr, pages << PAGE_SHIFT, pa); } hv_store_mapping((HV_VirtAddr)_stext, (uint32_t)(_einittext - _stext), 0); } /* * Use hv_inquire_physical() to populate node_{start,end}_pfn[] * and node_online_map, doing suitable sanity-checking. * Also set min_low_pfn, max_low_pfn, and max_pfn. */ static void __init setup_memory(void) { int i, j; int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 }; #ifdef CONFIG_HIGHMEM long highmem_pages; #endif #ifndef __tilegx__ int cap; #endif #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) long lowmem_pages; #endif /* We are using a char to hold the cpu_2_node[] mapping */ BUILD_BUG_ON(MAX_NUMNODES > 127); /* Discover the ranges of memory available to us */ for (i = 0; ; ++i) { unsigned long start, size, end, highbits; HV_PhysAddrRange range = hv_inquire_physical(i); if (range.size == 0) break; #ifdef CONFIG_FLATMEM if (i > 0) { pr_err("Can't use discontiguous PAs: %#llx..%#llx\n", range.size, range.start + range.size); continue; } #endif #ifndef __tilegx__ if ((unsigned long)range.start) { pr_err("Range not at 4GB multiple: %#llx..%#llx\n", range.start, range.start + range.size); continue; } #endif if ((range.start & (HPAGE_SIZE-1)) != 0 || (range.size & (HPAGE_SIZE-1)) != 0) { unsigned long long start_pa = range.start; unsigned long long orig_size = range.size; range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; range.size -= (range.start - start_pa); range.size &= HPAGE_MASK; pr_err("Range not hugepage-aligned: %#llx..%#llx:" " now %#llx-%#llx\n", start_pa, start_pa + orig_size, range.start, range.start + range.size); } highbits = __pa_to_highbits(range.start); if (highbits >= NR_PA_HIGHBIT_VALUES) { pr_err("PA high bits too high: %#llx..%#llx\n", range.start, range.start + range.size); continue; } if (highbits_seen[highbits]) { pr_err("Range overlaps in high bits: %#llx..%#llx\n", range.start, range.start + range.size); continue; } highbits_seen[highbits] = 1; if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { int max_size = maxnodemem_pfn[i]; if (max_size > 0) { pr_err("Maxnodemem reduced node %d to" " %d pages\n", i, max_size); range.size = PFN_PHYS(max_size); } else { pr_err("Maxnodemem disabled node %d\n", i); continue; } } if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { int max_size = maxmem_pfn - num_physpages; if (max_size > 0) { pr_err("Maxmem reduced node %d to %d pages\n", i, max_size); range.size = PFN_PHYS(max_size); } else { pr_err("Maxmem disabled node %d\n", i); continue; } } if (i >= MAX_NUMNODES) { pr_err("Too many PA nodes (#%d): %#llx...%#llx\n", i, range.size, range.size + range.start); continue; } start = range.start >> PAGE_SHIFT; size = range.size >> PAGE_SHIFT; end = start + size; #ifndef __tilegx__ if (((HV_PhysAddr)end << PAGE_SHIFT) != (range.start + range.size)) { pr_err("PAs too high to represent: %#llx..%#llx\n", range.start, range.start + range.size); continue; } #endif #ifdef CONFIG_PCI /* * Blocks that overlap the pci reserved region must * have enough space to hold the maximum percpu data * region at the top of the range. If there isn't * enough space above the reserved region, just * truncate the node. */ if (start <= pci_reserve_start_pfn && end > pci_reserve_start_pfn) { unsigned int per_cpu_size = __per_cpu_end - __per_cpu_start; unsigned int percpu_pages = NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); if (end < pci_reserve_end_pfn + percpu_pages) { end = pci_reserve_start_pfn; pr_err("PCI mapping region reduced node %d to" " %ld pages\n", i, end - start); } } #endif for (j = __pfn_to_highbits(start); j <= __pfn_to_highbits(end - 1); j++) highbits_to_node[j] = i; node_start_pfn[i] = start; node_end_pfn[i] = end; node_controller[i] = range.controller; num_physpages += size; max_pfn = end; /* Mark node as online */ node_set(i, node_online_map); node_set(i, node_possible_map); } #ifndef __tilegx__ /* * For 4KB pages, mem_map "struct page" data is 1% of the size * of the physical memory, so can be quite big (640 MB for * four 16G zones). These structures must be mapped in * lowmem, and since we currently cap out at about 768 MB, * it's impractical to try to use this much address space. * For now, arbitrarily cap the amount of physical memory * we're willing to use at 8 million pages (32GB of 4KB pages). */ cap = 8 * 1024 * 1024; /* 8 million pages */ if (num_physpages > cap) { int num_nodes = num_online_nodes(); int cap_each = cap / num_nodes; unsigned long dropped_pages = 0; for (i = 0; i < num_nodes; ++i) { int size = node_end_pfn[i] - node_start_pfn[i]; if (size > cap_each) { dropped_pages += (size - cap_each); node_end_pfn[i] = node_start_pfn[i] + cap_each; } } num_physpages -= dropped_pages; pr_warning("Only using %ldMB memory;" " ignoring %ldMB.\n", num_physpages >> (20 - PAGE_SHIFT), dropped_pages >> (20 - PAGE_SHIFT)); pr_warning("Consider using a larger page size.\n"); } #endif /* Heap starts just above the last loaded address. */ min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET); #ifdef CONFIG_HIGHMEM /* Find where we map lowmem from each controller. */ high_memory = setup_pa_va_mapping(); /* Set max_low_pfn based on what node 0 can directly address. */ max_low_pfn = node_lowmem_end_pfn[0]; lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? MAXMEM_PFN : mappable_physpages; highmem_pages = (long) (num_physpages - lowmem_pages); pr_notice("%ldMB HIGHMEM available.\n", pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); pr_notice("%ldMB LOWMEM available.\n", pages_to_mb(lowmem_pages)); #else /* Set max_low_pfn based on what node 0 can directly address. */ max_low_pfn = node_end_pfn[0]; #ifndef __tilegx__ if (node_end_pfn[0] > MAXMEM_PFN) { pr_warning("Only using %ldMB LOWMEM.\n", MAXMEM>>20); pr_warning("Use a HIGHMEM enabled kernel.\n"); max_low_pfn = MAXMEM_PFN; max_pfn = MAXMEM_PFN; num_physpages = MAXMEM_PFN; node_end_pfn[0] = MAXMEM_PFN; } else { pr_notice("%ldMB memory available.\n", pages_to_mb(node_end_pfn[0])); } for (i = 1; i < MAX_NUMNODES; ++i) { node_start_pfn[i] = 0; node_end_pfn[i] = 0; } high_memory = __va(node_end_pfn[0]); #else lowmem_pages = 0; for (i = 0; i < MAX_NUMNODES; ++i) { int pages = node_end_pfn[i] - node_start_pfn[i]; lowmem_pages += pages; if (pages) high_memory = pfn_to_kaddr(node_end_pfn[i]); } pr_notice("%ldMB memory available.\n", pages_to_mb(lowmem_pages)); #endif #endif } static void __init setup_bootmem_allocator(void) { unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn; /* Provide a node 0 bdata. */ NODE_DATA(0)->bdata = &node0_bdata; #ifdef CONFIG_PCI /* Don't let boot memory alias the PCI region. */ last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn); #else last_alloc_pfn = max_low_pfn; #endif /* * Initialize the boot-time allocator (with low memory only): * The first argument says where to put the bitmap, and the * second says where the end of allocatable memory is. */ bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn); /* * Let the bootmem allocator use all the space we've given it * except for its own bitmap. */ first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size); if (first_alloc_pfn >= last_alloc_pfn) early_panic("Not enough memory on controller 0 for bootmem\n"); free_bootmem(PFN_PHYS(first_alloc_pfn), PFN_PHYS(last_alloc_pfn - first_alloc_pfn)); #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); #endif } void *__init alloc_remap(int nid, unsigned long size) { int pages = node_end_pfn[nid] - node_start_pfn[nid]; void *map = pfn_to_kaddr(node_memmap_pfn[nid]); BUG_ON(size != pages * sizeof(struct page)); memset(map, 0, size); return map; } static int __init percpu_size(void) { int size = __per_cpu_end - __per_cpu_start; size += PERCPU_MODULE_RESERVE; size += PERCPU_DYNAMIC_EARLY_SIZE; if (size < PCPU_MIN_UNIT_SIZE) size = PCPU_MIN_UNIT_SIZE; size = roundup(size, PAGE_SIZE); /* In several places we assume the per-cpu data fits on a huge page. */ BUG_ON(kdata_huge && size > HPAGE_SIZE); return size; } static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal) { void *kva = __alloc_bootmem(size, PAGE_SIZE, goal); unsigned long pfn = kaddr_to_pfn(kva); BUG_ON(goal && PFN_PHYS(pfn) != goal); return pfn; } static void __init zone_sizes_init(void) { unsigned long zones_size[MAX_NR_ZONES] = { 0 }; int size = percpu_size(); int num_cpus = smp_height * smp_width; int i; for (i = 0; i < num_cpus; ++i) node_percpu[cpu_to_node(i)] += size; for_each_online_node(i) { unsigned long start = node_start_pfn[i]; unsigned long end = node_end_pfn[i]; #ifdef CONFIG_HIGHMEM unsigned long lowmem_end = node_lowmem_end_pfn[i]; #else unsigned long lowmem_end = end; #endif int memmap_size = (end - start) * sizeof(struct page); node_free_pfn[i] = start; /* * Set aside pages for per-cpu data and the mem_map array. * * Since the per-cpu data requires special homecaching, * if we are in kdata_huge mode, we put it at the end of * the lowmem region. If we're not in kdata_huge mode, * we take the per-cpu pages from the bottom of the * controller, since that avoids fragmenting a huge page * that users might want. We always take the memmap * from the bottom of the controller, since with * kdata_huge that lets it be under a huge TLB entry. * * If the user has requested isolnodes for a controller, * though, there'll be no lowmem, so we just alloc_bootmem * the memmap. There will be no percpu memory either. */ if (__pfn_to_highbits(start) == 0) { /* In low PAs, allocate via bootmem. */ unsigned long goal = 0; node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, goal); if (kdata_huge) goal = PFN_PHYS(lowmem_end) - node_percpu[i]; if (node_percpu[i]) node_percpu_pfn[i] = alloc_bootmem_pfn(node_percpu[i], goal); } else if (cpu_isset(i, isolnodes)) { node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0); BUG_ON(node_percpu[i] != 0); } else { /* In high PAs, just reserve some pages. */ node_memmap_pfn[i] = node_free_pfn[i]; node_free_pfn[i] += PFN_UP(memmap_size); if (!kdata_huge) { node_percpu_pfn[i] = node_free_pfn[i]; node_free_pfn[i] += PFN_UP(node_percpu[i]); } else { node_percpu_pfn[i] = lowmem_end - PFN_UP(node_percpu[i]); } } #ifdef CONFIG_HIGHMEM if (start > lowmem_end) { zones_size[ZONE_NORMAL] = 0; zones_size[ZONE_HIGHMEM] = end - start; } else { zones_size[ZONE_NORMAL] = lowmem_end - start; zones_size[ZONE_HIGHMEM] = end - lowmem_end; } #else zones_size[ZONE_NORMAL] = end - start; #endif /* * Everyone shares node 0's bootmem allocator, but * we use alloc_remap(), above, to put the actual * struct page array on the individual controllers, * which is most of the data that we actually care about. * We can't place bootmem allocators on the other * controllers since the bootmem allocator can only * operate on 32-bit physical addresses. */ NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; free_area_init_node(i, zones_size, start, NULL); printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n", PFN_UP(node_percpu[i])); /* Track the type of memory on each node */ if (zones_size[ZONE_NORMAL]) node_set_state(i, N_NORMAL_MEMORY); #ifdef CONFIG_HIGHMEM if (end != start) node_set_state(i, N_HIGH_MEMORY); #endif node_set_online(i); } } #ifdef CONFIG_NUMA /* which logical CPUs are on which nodes */ struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; EXPORT_SYMBOL(node_2_cpu_mask); /* which node each logical CPU is on */ char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); EXPORT_SYMBOL(cpu_2_node); /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus) { if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus)) return -1; else return cpu_to_node(cpu); } /* Return number of immediately-adjacent tiles sharing the same NUMA node. */ static int __init node_neighbors(int node, int cpu, struct cpumask *unbound_cpus) { int neighbors = 0; int w = smp_width; int h = smp_height; int x = cpu % w; int y = cpu / w; if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node) ++neighbors; if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node) ++neighbors; if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node) ++neighbors; if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node) ++neighbors; return neighbors; } static void __init setup_numa_mapping(void) { int distance[MAX_NUMNODES][NR_CPUS]; HV_Coord coord; int cpu, node, cpus, i, x, y; int num_nodes = num_online_nodes(); struct cpumask unbound_cpus; nodemask_t default_nodes; cpumask_clear(&unbound_cpus); /* Get set of nodes we will use for defaults */ nodes_andnot(default_nodes, node_online_map, isolnodes); if (nodes_empty(default_nodes)) { BUG_ON(!node_isset(0, node_online_map)); pr_err("Forcing NUMA node zero available as a default node\n"); node_set(0, default_nodes); } /* Populate the distance[] array */ memset(distance, -1, sizeof(distance)); cpu = 0; for (coord.y = 0; coord.y < smp_height; ++coord.y) { for (coord.x = 0; coord.x < smp_width; ++coord.x, ++cpu) { BUG_ON(cpu >= nr_cpu_ids); if (!cpu_possible(cpu)) { cpu_2_node[cpu] = -1; continue; } for_each_node_mask(node, default_nodes) { HV_MemoryControllerInfo info = hv_inquire_memory_controller( coord, node_controller[node]); distance[node][cpu] = ABS(info.coord.x) + ABS(info.coord.y); } cpumask_set_cpu(cpu, &unbound_cpus); } } cpus = cpu; /* * Round-robin through the NUMA nodes until all the cpus are * assigned. We could be more clever here (e.g. create four * sorted linked lists on the same set of cpu nodes, and pull * off them in round-robin sequence, removing from all four * lists each time) but given the relatively small numbers * involved, O(n^2) seem OK for a one-time cost. */ node = first_node(default_nodes); while (!cpumask_empty(&unbound_cpus)) { int best_cpu = -1; int best_distance = INT_MAX; for (cpu = 0; cpu < cpus; ++cpu) { if (cpumask_test_cpu(cpu, &unbound_cpus)) { /* * Compute metric, which is how much * closer the cpu is to this memory * controller than the others, shifted * up, and then the number of * neighbors already in the node as an * epsilon adjustment to try to keep * the nodes compact. */ int d = distance[node][cpu] * num_nodes; for_each_node_mask(i, default_nodes) { if (i != node) d -= distance[i][cpu]; } d *= 8; /* allow space for epsilon */ d -= node_neighbors(node, cpu, &unbound_cpus); if (d < best_distance) { best_cpu = cpu; best_distance = d; } } } BUG_ON(best_cpu < 0); cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); cpu_2_node[best_cpu] = node; cpumask_clear_cpu(best_cpu, &unbound_cpus); node = next_node(node, default_nodes); if (node == MAX_NUMNODES) node = first_node(default_nodes); } /* Print out node assignments and set defaults for disabled cpus */ cpu = 0; for (y = 0; y < smp_height; ++y) { printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); for (x = 0; x < smp_width; ++x, ++cpu) { if (cpu_to_node(cpu) < 0) { pr_cont(" -"); cpu_2_node[cpu] = first_node(default_nodes); } else { pr_cont(" %d", cpu_to_node(cpu)); } } pr_cont("\n"); } } static struct cpu cpu_devices[NR_CPUS]; static int __init topology_init(void) { int i; for_each_online_node(i) register_one_node(i); for (i = 0; i < smp_height * smp_width; ++i) register_cpu(&cpu_devices[i], i); return 0; } subsys_initcall(topology_init); #else /* !CONFIG_NUMA */ #define setup_numa_mapping() do { } while (0) #endif /* CONFIG_NUMA */ /** * setup_cpu() - Do all necessary per-cpu, tile-specific initialization. * @boot: Is this the boot cpu? * * Called from setup_arch() on the boot cpu, or online_secondary(). */ void __cpuinit setup_cpu(int boot) { /* The boot cpu sets up its permanent mappings much earlier. */ if (!boot) store_permanent_mappings(); /* Allow asynchronous TLB interrupts. */ #if CHIP_HAS_TILE_DMA() arch_local_irq_unmask(INT_DMATLB_MISS); arch_local_irq_unmask(INT_DMATLB_ACCESS); #endif #if CHIP_HAS_SN_PROC() arch_local_irq_unmask(INT_SNITLB_MISS); #endif #ifdef __tilegx__ arch_local_irq_unmask(INT_SINGLE_STEP_K); #endif /* * Allow user access to many generic SPRs, like the cycle * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc. */ __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1); #if CHIP_HAS_SN() /* Static network is not restricted. */ __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); #endif #if CHIP_HAS_SN_PROC() __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1); __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1); #endif /* * Set the MPL for interrupt control 0 & 1 to the corresponding * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT * SPRs, as well as the interrupt mask. */ __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1); /* Initialize IRQ support for this cpu. */ setup_irq_regs(); #ifdef CONFIG_HARDWALL /* Reset the network state on this cpu. */ reset_network_state(); #endif } #ifdef CONFIG_BLK_DEV_INITRD /* * Note that the kernel can potentially support other compression * techniques than gz, though we don't do so by default. If we ever * decide to do so we can either look for other filename extensions, * or just allow a file with this name to be compressed with an * arbitrary compressor (somewhat counterintuitively). */ static int __initdata set_initramfs_file; static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; static int __init setup_initramfs_file(char *str) { if (str == NULL) return -EINVAL; strncpy(initramfs_file, str, sizeof(initramfs_file) - 1); set_initramfs_file = 1; return 0; } early_param("initramfs_file", setup_initramfs_file); /* * We look for an "initramfs.cpio.gz" file in the hvfs. * If there is one, we allocate some memory for it and it will be * unpacked to the initramfs. */ static void __init load_hv_initrd(void) { HV_FS_StatInfo stat; int fd, rc; void *initrd; fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { if (set_initramfs_file) pr_warning("No such hvfs initramfs file '%s'\n", initramfs_file); return; } BUG_ON(fd < 0); stat = hv_fs_fstat(fd); BUG_ON(stat.size < 0); if (stat.flags & HV_FS_ISDIR) { pr_warning("Ignoring hvfs file '%s': it's a directory.\n", initramfs_file); return; } initrd = alloc_bootmem_pages(stat.size); rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); if (rc != stat.size) { pr_err("Error reading %d bytes from hvfs file '%s': %d\n", stat.size, initramfs_file, rc); free_initrd_mem((unsigned long) initrd, stat.size); return; } initrd_start = (unsigned long) initrd; initrd_end = initrd_start + stat.size; } void __init free_initrd_mem(unsigned long begin, unsigned long end) { free_bootmem(__pa(begin), end - begin); } #else static inline void load_hv_initrd(void) {} #endif /* CONFIG_BLK_DEV_INITRD */ static void __init validate_hv(void) { /* * It may already be too late, but let's check our built-in * configuration against what the hypervisor is providing. */ unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE); int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL); int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE); HV_ASIDRange asid_range; #ifndef CONFIG_SMP HV_Topology topology = hv_inquire_topology(); BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); if (topology.width != 1 || topology.height != 1) { pr_warning("Warning: booting UP kernel on %dx%d grid;" " will ignore all but first tile.\n", topology.width, topology.height); } #endif if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text) early_panic("Hypervisor glue size %ld is too big!\n", glue_size); if (hv_page_size != PAGE_SIZE) early_panic("Hypervisor page size %#x != our %#lx\n", hv_page_size, PAGE_SIZE); if (hv_hpage_size != HPAGE_SIZE) early_panic("Hypervisor huge page size %#x != our %#lx\n", hv_hpage_size, HPAGE_SIZE); #ifdef CONFIG_SMP /* * Some hypervisor APIs take a pointer to a bitmap array * whose size is at least the number of cpus on the chip. * We use a struct cpumask for this, so it must be big enough. */ if ((smp_height * smp_width) > nr_cpu_ids) early_panic("Hypervisor %d x %d grid too big for Linux" " NR_CPUS %d\n", smp_height, smp_width, nr_cpu_ids); #endif /* * Check that we're using allowed ASIDs, and initialize the * various asid variables to their appropriate initial states. */ asid_range = hv_inquire_asid(0); __get_cpu_var(current_asid) = min_asid = asid_range.start; max_asid = asid_range.start + asid_range.size - 1; if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, sizeof(chip_model)) < 0) { pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); strlcpy(chip_model, "unknown", sizeof(chip_model)); } } static void __init validate_va(void) { #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ /* * Similarly, make sure we're only using allowed VAs. * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, * and 0 .. KERNEL_HIGH_VADDR. * In addition, make sure we CAN'T use the end of memory, since * we use the last chunk of each pgd for the pgd_list. */ int i, user_kernel_ok = 0; unsigned long max_va = 0; unsigned long list_va = ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); for (i = 0; ; ++i) { HV_VirtAddrRange range = hv_inquire_virtual(i); if (range.size == 0) break; if (range.start <= MEM_USER_INTRPT && range.start + range.size >= MEM_HV_INTRPT) user_kernel_ok = 1; if (range.start == 0) max_va = range.size; BUG_ON(range.start + range.size > list_va); } if (!user_kernel_ok) early_panic("Hypervisor not configured for user/kernel VAs\n"); if (max_va == 0) early_panic("Hypervisor not configured for low VAs\n"); if (max_va < KERNEL_HIGH_VADDR) early_panic("Hypervisor max VA %#lx smaller than %#lx\n", max_va, KERNEL_HIGH_VADDR); /* Kernel PCs must have their high bit set; see intvec.S. */ if ((long)VMALLOC_START >= 0) early_panic( "Linux VMALLOC region below the 2GB line (%#lx)!\n" "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" "or smaller VMALLOC_RESERVE.\n", VMALLOC_START); #endif } /* * cpu_lotar_map lists all the cpus that are valid for the supervisor * to cache data on at a page level, i.e. what cpus can be placed in * the LOTAR field of a PTE. It is equivalent to the set of possible * cpus plus any other cpus that are willing to share their cache. * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). */ struct cpumask __write_once cpu_lotar_map; EXPORT_SYMBOL(cpu_lotar_map); #if CHIP_HAS_CBOX_HOME_MAP() /* * hash_for_home_map lists all the tiles that hash-for-home data * will be cached on. Note that this may includes tiles that are not * valid for this supervisor to use otherwise (e.g. if a hypervisor * device is being shared between multiple supervisors). * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE). */ struct cpumask hash_for_home_map; EXPORT_SYMBOL(hash_for_home_map); #endif /* * cpu_cacheable_map lists all the cpus whose caches the hypervisor can * flush on our behalf. It is set to cpu_possible_mask OR'ed with * hash_for_home_map, and it is what should be passed to * hv_flush_remote() to flush all caches. Note that if there are * dedicated hypervisor driver tiles that have authorized use of their * cache, those tiles will only appear in cpu_lotar_map, NOT in * cpu_cacheable_map, as they are a special case. */ struct cpumask __write_once cpu_cacheable_map; EXPORT_SYMBOL(cpu_cacheable_map); static __initdata struct cpumask disabled_map; static int __init disabled_cpus(char *str) { int boot_cpu = smp_processor_id(); if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) return -EINVAL; if (cpumask_test_cpu(boot_cpu, &disabled_map)) { pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); cpumask_clear_cpu(boot_cpu, &disabled_map); } return 0; } early_param("disabled_cpus", disabled_cpus); void __init print_disabled_cpus(void) { if (!cpumask_empty(&disabled_map)) { char buf[100]; cpulist_scnprintf(buf, sizeof(buf), &disabled_map); pr_info("CPUs not available for Linux: %s\n", buf); } } static void __init setup_cpu_maps(void) { struct cpumask hv_disabled_map, cpu_possible_init; int boot_cpu = smp_processor_id(); int cpus, i, rc; /* Learn which cpus are allowed by the hypervisor. */ rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL, (HV_VirtAddr) cpumask_bits(&cpu_possible_init), sizeof(cpu_cacheable_map)); if (rc < 0) early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc); if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init)) early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu); /* Compute the cpus disabled by the hvconfig file. */ cpumask_complement(&hv_disabled_map, &cpu_possible_init); /* Include them with the cpus disabled by "disabled_cpus". */ cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map); /* * Disable every cpu after "setup_max_cpus". But don't mark * as disabled the cpus that are outside of our initial rectangle, * since that turns out to be confusing. */ cpus = 1; /* this cpu */ cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */ for (i = 0; cpus < setup_max_cpus; ++i) if (!cpumask_test_cpu(i, &disabled_map)) ++cpus; for (; i < smp_height * smp_width; ++i) cpumask_set_cpu(i, &disabled_map); cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */ for (i = smp_height * smp_width; i < NR_CPUS; ++i) cpumask_clear_cpu(i, &disabled_map); /* * Setup cpu_possible map as every cpu allocated to us, minus * the results of any "disabled_cpus" settings. */ cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map); init_cpu_possible(&cpu_possible_init); /* Learn which cpus are valid for LOTAR caching. */ rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR, (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), sizeof(cpu_lotar_map)); if (rc < 0) { pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); cpu_lotar_map = *cpu_possible_mask; } #if CHIP_HAS_CBOX_HOME_MAP() /* Retrieve set of CPUs used for hash-for-home caching */ rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, (HV_VirtAddr) hash_for_home_map.bits, sizeof(hash_for_home_map)); if (rc < 0) early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map); #else cpu_cacheable_map = *cpu_possible_mask; #endif } static int __init dataplane(char *str) { pr_warning("WARNING: dataplane support disabled in this kernel\n"); return 0; } early_param("dataplane", dataplane); #ifdef CONFIG_CMDLINE_BOOL static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; #endif void __init setup_arch(char **cmdline_p) { int len; #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) len = hv_get_command_line((HV_VirtAddr) boot_command_line, COMMAND_LINE_SIZE); if (boot_command_line[0]) pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", boot_command_line); strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); #else char *hv_cmdline; #if defined(CONFIG_CMDLINE_BOOL) if (builtin_cmdline[0]) { int builtin_len = strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); if (builtin_len < COMMAND_LINE_SIZE-1) boot_command_line[builtin_len++] = ' '; hv_cmdline = &boot_command_line[builtin_len]; len = COMMAND_LINE_SIZE - builtin_len; } else #endif { hv_cmdline = boot_command_line; len = COMMAND_LINE_SIZE; } len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len); if (len < 0 || len > COMMAND_LINE_SIZE) early_panic("hv_get_command_line failed: %d\n", len); #endif *cmdline_p = boot_command_line; /* Set disabled_map and setup_max_cpus very early */ parse_early_param(); /* Make sure the kernel is compatible with the hypervisor. */ validate_hv(); validate_va(); setup_cpu_maps(); #ifdef CONFIG_PCI /* * Initialize the PCI structures. This is done before memory * setup so that we know whether or not a pci_reserve region * is necessary. */ if (tile_pci_init() == 0) pci_reserve_mb = 0; /* PCI systems reserve a region just below 4GB for mapping iomem. */ pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT)); pci_reserve_start_pfn = pci_reserve_end_pfn - (pci_reserve_mb << (20 - PAGE_SHIFT)); #endif init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; setup_memory(); store_permanent_mappings(); setup_bootmem_allocator(); /* * NOTE: before this point _nobody_ is allowed to allocate * any memory using the bootmem allocator. */ paging_init(); setup_numa_mapping(); zone_sizes_init(); set_page_homes(); setup_cpu(1); setup_clock(); load_hv_initrd(); } /* * Set up per-cpu memory. */ unsigned long __per_cpu_offset[NR_CPUS] __write_once; EXPORT_SYMBOL(__per_cpu_offset); static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 }; /* * As the percpu code allocates pages, we return the pages from the * end of the node for the specified cpu. */ static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) { int nid = cpu_to_node(cpu); unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid]; BUG_ON(size % PAGE_SIZE != 0); pfn_offset[nid] += size / PAGE_SIZE; BUG_ON(node_percpu[nid] < size); node_percpu[nid] -= size; if (percpu_pfn[cpu] == 0) percpu_pfn[cpu] = pfn; return pfn_to_kaddr(pfn); } /* * Pages reserved for percpu memory are not freeable, and in any case we are * on a short path to panic() in setup_per_cpu_area() at this point anyway. */ static void __init pcpu_fc_free(void *ptr, size_t size) { } /* * Set up vmalloc page tables using bootmem for the percpu code. */ static void __init pcpu_fc_populate_pte(unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; BUG_ON(pgd_addr_invalid(addr)); if (addr < VMALLOC_START || addr >= VMALLOC_END) panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;" " try increasing CONFIG_VMALLOC_RESERVE\n", addr, VMALLOC_START, VMALLOC_END); pgd = swapper_pg_dir + pgd_index(addr); pud = pud_offset(pgd, addr); BUG_ON(!pud_present(*pud)); pmd = pmd_offset(pud, addr); if (pmd_present(*pmd)) { BUG_ON(pmd_huge_page(*pmd)); } else { pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); pmd_populate_kernel(&init_mm, pmd, pte); } } void __init setup_per_cpu_areas(void) { struct page *pg; unsigned long delta, pfn, lowmem_va; unsigned long size = percpu_size(); char *ptr; int rc, cpu, i; rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc, pcpu_fc_free, pcpu_fc_populate_pte); if (rc < 0) panic("Cannot initialize percpu area (err=%d)", rc); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; /* finv the copy out of cache so we can change homecache */ ptr = pcpu_base_addr + pcpu_unit_offsets[cpu]; __finv_buffer(ptr, size); pfn = percpu_pfn[cpu]; /* Rewrite the page tables to cache on that cpu */ pg = pfn_to_page(pfn); for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { /* Update the vmalloc mapping and page home. */ pte_t *ptep = virt_to_pte(NULL, (unsigned long)ptr + i); pte_t pte = *ptep; BUG_ON(pfn != pte_pfn(pte)); pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); pte = set_remote_cache_cpu(pte, cpu); set_pte(ptep, pte); /* Update the lowmem mapping for consistency. */ lowmem_va = (unsigned long)pfn_to_kaddr(pfn); ptep = virt_to_pte(NULL, lowmem_va); if (pte_huge(*ptep)) { printk(KERN_DEBUG "early shatter of huge page" " at %#lx\n", lowmem_va); shatter_pmd((pmd_t *)ptep); ptep = virt_to_pte(NULL, lowmem_va); BUG_ON(pte_huge(*ptep)); } BUG_ON(pfn != pte_pfn(*ptep)); set_pte(ptep, pte); } } /* Set our thread pointer appropriately. */ set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]); /* Make sure the finv's have completed. */ mb_incoherent(); /* Flush the TLB so we reference it properly from here on out. */ local_flush_tlb_all(); } static struct resource data_resource = { .name = "Kernel data", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource code_resource = { .name = "Kernel code", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; /* * We reserve all resources above 4GB so that PCI won't try to put * mappings above 4GB; the standard allows that for some devices but * the probing code trunates values to 32 bits. */ #ifdef CONFIG_PCI static struct resource* __init insert_non_bus_resource(void) { struct resource *res = kzalloc(sizeof(struct resource), GFP_ATOMIC); res->name = "Non-Bus Physical Address Space"; res->start = (1ULL << 32); res->end = -1LL; res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; if (insert_resource(&iomem_resource, res)) { kfree(res); return NULL; } return res; } #endif static struct resource* __init insert_ram_resource(u64 start_pfn, u64 end_pfn) { struct resource *res = kzalloc(sizeof(struct resource), GFP_ATOMIC); res->name = "System RAM"; res->start = start_pfn << PAGE_SHIFT; res->end = (end_pfn << PAGE_SHIFT) - 1; res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; if (insert_resource(&iomem_resource, res)) { kfree(res); return NULL; } return res; } /* * Request address space for all standard resources * * If the system includes PCI root complex drivers, we need to create * a window just below 4GB where PCI BARs can be mapped. */ static int __init request_standard_resources(void) { int i; enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; iomem_resource.end = -1LL; #ifdef CONFIG_PCI insert_non_bus_resource(); #endif for_each_online_node(i) { u64 start_pfn = node_start_pfn[i]; u64 end_pfn = node_end_pfn[i]; #ifdef CONFIG_PCI if (start_pfn <= pci_reserve_start_pfn && end_pfn > pci_reserve_start_pfn) { if (end_pfn > pci_reserve_end_pfn) insert_ram_resource(pci_reserve_end_pfn, end_pfn); end_pfn = pci_reserve_start_pfn; } #endif insert_ram_resource(start_pfn, end_pfn); } code_resource.start = __pa(_text - CODE_DELTA); code_resource.end = __pa(_etext - CODE_DELTA)-1; data_resource.start = __pa(_sdata); data_resource.end = __pa(_end)-1; insert_resource(&iomem_resource, &code_resource); insert_resource(&iomem_resource, &data_resource); #ifdef CONFIG_KEXEC insert_resource(&iomem_resource, &crashk_res); #endif return 0; } subsys_initcall(request_standard_resources);
gpl-2.0
papi92/android_kernel_google_msm
arch/powerpc/kvm/e500.c
4413
6439
/* * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. * * Author: Yu Liu, <yu.liu@freescale.com> * * Description: * This file is derived from arch/powerpc/kvm/44x.c, * by Hollis Blanchard <hollisb@us.ibm.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. */ #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> #include <asm/reg.h> #include <asm/cputable.h> #include <asm/tlbflush.h> #include <asm/kvm_e500.h> #include <asm/kvm_ppc.h> #include "booke.h" #include "e500_tlb.h" void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) { } void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) { } void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { kvmppc_e500_tlb_load(vcpu, cpu); } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { kvmppc_e500_tlb_put(vcpu); #ifdef CONFIG_SPE if (vcpu->arch.shadow_msr & MSR_SPE) kvmppc_vcpu_disable_spe(vcpu); #endif } int kvmppc_core_check_processor_compat(void) { int r; if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0) r = 0; else r = -ENOTSUPP; return r; } int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); kvmppc_e500_tlb_setup(vcpu_e500); /* Registers init */ vcpu->arch.pvr = mfspr(SPRN_PVR); vcpu_e500->svr = mfspr(SPRN_SVR); vcpu->arch.cpu_type = KVM_CPU_E500V2; return 0; } /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { int index; gva_t eaddr; u8 pid; u8 as; eaddr = tr->linear_address; pid = (tr->linear_address >> 32) & 0xff; as = (tr->linear_address >> 40) & 0x1; index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); if (index < 0) { tr->valid = 0; return 0; } tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); /* XXX what does "writeable" and "usermode" even mean? */ tr->valid = 1; return 0; } void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE | KVM_SREGS_E_PM; sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; sregs->u.e.impl.fsl.features = 0; sregs->u.e.impl.fsl.svr = vcpu_e500->svr; sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; sregs->u.e.mas0 = vcpu->arch.shared->mas0; sregs->u.e.mas1 = vcpu->arch.shared->mas1; sregs->u.e.mas2 = vcpu->arch.shared->mas2; sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; sregs->u.e.mas4 = vcpu->arch.shared->mas4; sregs->u.e.mas6 = vcpu->arch.shared->mas6; sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG); sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg; sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg; sregs->u.e.tlbcfg[2] = 0; sregs->u.e.tlbcfg[3] = 0; sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; sregs->u.e.ivor_high[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; kvmppc_get_sregs_ivor(vcpu, sregs); } int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { vcpu_e500->svr = sregs->u.e.impl.fsl.svr; vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; } if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { vcpu->arch.shared->mas0 = sregs->u.e.mas0; vcpu->arch.shared->mas1 = sregs->u.e.mas1; vcpu->arch.shared->mas2 = sregs->u.e.mas2; vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; vcpu->arch.shared->mas4 = sregs->u.e.mas4; vcpu->arch.shared->mas6 = sregs->u.e.mas6; } if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) return 0; if (sregs->u.e.features & KVM_SREGS_E_SPE) { vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = sregs->u.e.ivor_high[0]; vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = sregs->u.e.ivor_high[1]; vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = sregs->u.e.ivor_high[2]; } if (sregs->u.e.features & KVM_SREGS_E_PM) { vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = sregs->u.e.ivor_high[3]; } return kvmppc_set_sregs_ivor(vcpu, sregs); } struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvmppc_vcpu_e500 *vcpu_e500; struct kvm_vcpu *vcpu; int err; vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu_e500) { err = -ENOMEM; goto out; } vcpu = &vcpu_e500->vcpu; err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto free_vcpu; err = kvmppc_e500_tlb_init(vcpu_e500); if (err) goto uninit_vcpu; vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); if (!vcpu->arch.shared) goto uninit_tlb; return vcpu; uninit_tlb: kvmppc_e500_tlb_uninit(vcpu_e500); uninit_vcpu: kvm_vcpu_uninit(vcpu); free_vcpu: kmem_cache_free(kvm_vcpu_cache, vcpu_e500); out: return ERR_PTR(err); } void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); free_page((unsigned long)vcpu->arch.shared); kvm_vcpu_uninit(vcpu); kvmppc_e500_tlb_uninit(vcpu_e500); kmem_cache_free(kvm_vcpu_cache, vcpu_e500); } static int __init kvmppc_e500_init(void) { int r, i; unsigned long ivor[3]; unsigned long max_ivor = 0; r = kvmppc_core_check_processor_compat(); if (r) return r; r = kvmppc_booke_init(); if (r) return r; /* copy extra E500 exception handlers */ ivor[0] = mfspr(SPRN_IVOR32); ivor[1] = mfspr(SPRN_IVOR33); ivor[2] = mfspr(SPRN_IVOR34); for (i = 0; i < 3; i++) { if (ivor[i] > max_ivor) max_ivor = ivor[i]; memcpy((void *)kvmppc_booke_handlers + ivor[i], kvmppc_handlers_start + (i + 16) * kvmppc_handler_len, kvmppc_handler_len); } flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); } static void __exit kvmppc_e500_exit(void) { kvmppc_booke_exit(); } module_init(kvmppc_e500_init); module_exit(kvmppc_e500_exit);
gpl-2.0
yatto/Android_Kernel_ME302KL_NOUGAT
net/ipv4/netfilter/nf_nat_rule.c
4669
5627
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Everything about the rules for NAT. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/route.h> #include <linux/bitops.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_core.h> #include <net/netfilter/nf_nat_rule.h> #define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ (1 << NF_INET_POST_ROUTING) | \ (1 << NF_INET_LOCAL_OUT) | \ (1 << NF_INET_LOCAL_IN)) static const struct xt_table nat_table = { .name = "nat", .valid_hooks = NAT_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, }; /* Source NAT */ static unsigned int ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING || par->hooknum == NF_INET_LOCAL_IN); ct = nf_ct_get(skb, &ctinfo); /* Connection must be valid and new. */ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY)); NF_CT_ASSERT(par->out != NULL); return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_SRC); } static unsigned int ipt_dnat_target(struct sk_buff *skb, const struct xt_action_param *par) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT); ct = nf_ct_get(skb, &ctinfo); /* Connection must be valid and new. */ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); return nf_nat_setup_info(ct, &mr->range[0], NF_NAT_MANIP_DST); } static int ipt_snat_checkentry(const struct xt_tgchk_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; /* Must be a valid range */ if (mr->rangesize != 1) { pr_info("SNAT: multiple ranges no longer supported\n"); return -EINVAL; } return 0; } static int ipt_dnat_checkentry(const struct xt_tgchk_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; /* Must be a valid range */ if (mr->rangesize != 1) { pr_info("DNAT: multiple ranges no longer supported\n"); return -EINVAL; } return 0; } static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) { /* Force range to this IP; let proto decide mapping for per-proto parts (hence not NF_NAT_RANGE_PROTO_SPECIFIED). */ struct nf_nat_ipv4_range range; range.flags = 0; pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip : &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip); return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); } int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum, const struct net_device *in, const struct net_device *out, struct nf_conn *ct) { struct net *net = nf_ct_net(ct); int ret; ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table); if (ret == NF_ACCEPT) { if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) /* NUL mapping */ ret = alloc_null_binding(ct, hooknum); } return ret; } static struct xt_target ipt_snat_reg __read_mostly = { .name = "SNAT", .target = ipt_snat_target, .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), .checkentry = ipt_snat_checkentry, .family = AF_INET, }; static struct xt_target ipt_dnat_reg __read_mostly = { .name = "DNAT", .target = ipt_dnat_target, .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), .checkentry = ipt_dnat_checkentry, .family = AF_INET, }; static int __net_init nf_nat_rule_net_init(struct net *net) { struct ipt_replace *repl; repl = ipt_alloc_initial_table(&nat_table); if (repl == NULL) return -ENOMEM; net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl); kfree(repl); if (IS_ERR(net->ipv4.nat_table)) return PTR_ERR(net->ipv4.nat_table); return 0; } static void __net_exit nf_nat_rule_net_exit(struct net *net) { ipt_unregister_table(net, net->ipv4.nat_table); } static struct pernet_operations nf_nat_rule_net_ops = { .init = nf_nat_rule_net_init, .exit = nf_nat_rule_net_exit, }; int __init nf_nat_rule_init(void) { int ret; ret = register_pernet_subsys(&nf_nat_rule_net_ops); if (ret != 0) goto out; ret = xt_register_target(&ipt_snat_reg); if (ret != 0) goto unregister_table; ret = xt_register_target(&ipt_dnat_reg); if (ret != 0) goto unregister_snat; return ret; unregister_snat: xt_unregister_target(&ipt_snat_reg); unregister_table: unregister_pernet_subsys(&nf_nat_rule_net_ops); out: return ret; } void nf_nat_rule_cleanup(void) { xt_unregister_target(&ipt_dnat_reg); xt_unregister_target(&ipt_snat_reg); unregister_pernet_subsys(&nf_nat_rule_net_ops); }
gpl-2.0
RomaVis/eeenote-kernel
net/ipv4/netfilter/nf_nat_proto_udplite.c
4669
2731
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/udp.h> #include <linux/netfilter.h> #include <linux/module.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_protocol.h> static u_int16_t udplite_port_rover; static void udplite_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_ipv4_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udplite_port_rover); } static bool udplite_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); struct udphdr *hdr; unsigned int hdroff = iphdroff + iph->ihl*4; __be32 oldip, newip; __be16 *portptr, newport; if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) return false; iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct udphdr *)(skb->data + hdroff); if (maniptype == NF_NAT_MANIP_SRC) { /* Get rid of src ip and src pt */ oldip = iph->saddr; newip = tuple->src.u3.ip; newport = tuple->src.u.udp.port; portptr = &hdr->source; } else { /* Get rid of dst ip and dst pt */ oldip = iph->daddr; newip = tuple->dst.u3.ip; newport = tuple->dst.u.udp.port; portptr = &hdr->dest; } inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0); if (!hdr->check) hdr->check = CSUM_MANGLED_0; *portptr = newport; return true; } static const struct nf_nat_protocol nf_nat_protocol_udplite = { .protonum = IPPROTO_UDPLITE, .manip_pkt = udplite_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = udplite_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; static int __init nf_nat_proto_udplite_init(void) { return nf_nat_protocol_register(&nf_nat_protocol_udplite); } static void __exit nf_nat_proto_udplite_fini(void) { nf_nat_protocol_unregister(&nf_nat_protocol_udplite); } module_init(nf_nat_proto_udplite_init); module_exit(nf_nat_proto_udplite_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("UDP-Lite NAT protocol helper"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
gpl-2.0
TeamWin/android_kernel_lge_msm8974
arch/sparc/kernel/mdesc.c
4669
20876
/* mdesc.c: Sun4V machine description handling. * * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/memblock.h> #include <linux/log2.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/bootmem.h> #include <linux/export.h> #include <asm/cpudata.h> #include <asm/hypervisor.h> #include <asm/mdesc.h> #include <asm/prom.h> #include <asm/uaccess.h> #include <asm/oplib.h> #include <asm/smp.h> /* Unlike the OBP device tree, the machine description is a full-on * DAG. An arbitrary number of ARCs are possible from one * node to other nodes and thus we can't use the OBP device_node * data structure to represent these nodes inside of the kernel. * * Actually, it isn't even a DAG, because there are back pointers * which create cycles in the graph. * * mdesc_hdr and mdesc_elem describe the layout of the data structure * we get from the Hypervisor. */ struct mdesc_hdr { u32 version; /* Transport version */ u32 node_sz; /* node block size */ u32 name_sz; /* name block size */ u32 data_sz; /* data block size */ } __attribute__((aligned(16))); struct mdesc_elem { u8 tag; #define MD_LIST_END 0x00 #define MD_NODE 0x4e #define MD_NODE_END 0x45 #define MD_NOOP 0x20 #define MD_PROP_ARC 0x61 #define MD_PROP_VAL 0x76 #define MD_PROP_STR 0x73 #define MD_PROP_DATA 0x64 u8 name_len; u16 resv; u32 name_offset; union { struct { u32 data_len; u32 data_offset; } data; u64 val; } d; }; struct mdesc_mem_ops { struct mdesc_handle *(*alloc)(unsigned int mdesc_size); void (*free)(struct mdesc_handle *handle); }; struct mdesc_handle { struct list_head list; struct mdesc_mem_ops *mops; void *self_base; atomic_t refcnt; unsigned int handle_size; struct mdesc_hdr mdesc; }; static void mdesc_handle_init(struct mdesc_handle *hp, unsigned int handle_size, void *base) { BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1)); memset(hp, 0, handle_size); INIT_LIST_HEAD(&hp->list); hp->self_base = base; atomic_set(&hp->refcnt, 1); hp->handle_size = handle_size; } static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size) { unsigned int handle_size, alloc_size; struct mdesc_handle *hp; unsigned long paddr; handle_size = (sizeof(struct mdesc_handle) - sizeof(struct mdesc_hdr) + mdesc_size); alloc_size = PAGE_ALIGN(handle_size); paddr = memblock_alloc(alloc_size, PAGE_SIZE); hp = NULL; if (paddr) { hp = __va(paddr); mdesc_handle_init(hp, handle_size, hp); } return hp; } static void __init mdesc_memblock_free(struct mdesc_handle *hp) { unsigned int alloc_size; unsigned long start; BUG_ON(atomic_read(&hp->refcnt) != 0); BUG_ON(!list_empty(&hp->list)); alloc_size = PAGE_ALIGN(hp->handle_size); start = __pa(hp); free_bootmem_late(start, alloc_size); } static struct mdesc_mem_ops memblock_mdesc_ops = { .alloc = mdesc_memblock_alloc, .free = mdesc_memblock_free, }; static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) { unsigned int handle_size; void *base; handle_size = (sizeof(struct mdesc_handle) - sizeof(struct mdesc_hdr) + mdesc_size); base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL); if (base) { struct mdesc_handle *hp; unsigned long addr; addr = (unsigned long)base; addr = (addr + 15UL) & ~15UL; hp = (struct mdesc_handle *) addr; mdesc_handle_init(hp, handle_size, base); return hp; } return NULL; } static void mdesc_kfree(struct mdesc_handle *hp) { BUG_ON(atomic_read(&hp->refcnt) != 0); BUG_ON(!list_empty(&hp->list)); kfree(hp->self_base); } static struct mdesc_mem_ops kmalloc_mdesc_memops = { .alloc = mdesc_kmalloc, .free = mdesc_kfree, }; static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size, struct mdesc_mem_ops *mops) { struct mdesc_handle *hp = mops->alloc(mdesc_size); if (hp) hp->mops = mops; return hp; } static void mdesc_free(struct mdesc_handle *hp) { hp->mops->free(hp); } static struct mdesc_handle *cur_mdesc; static LIST_HEAD(mdesc_zombie_list); static DEFINE_SPINLOCK(mdesc_lock); struct mdesc_handle *mdesc_grab(void) { struct mdesc_handle *hp; unsigned long flags; spin_lock_irqsave(&mdesc_lock, flags); hp = cur_mdesc; if (hp) atomic_inc(&hp->refcnt); spin_unlock_irqrestore(&mdesc_lock, flags); return hp; } EXPORT_SYMBOL(mdesc_grab); void mdesc_release(struct mdesc_handle *hp) { unsigned long flags; spin_lock_irqsave(&mdesc_lock, flags); if (atomic_dec_and_test(&hp->refcnt)) { list_del_init(&hp->list); hp->mops->free(hp); } spin_unlock_irqrestore(&mdesc_lock, flags); } EXPORT_SYMBOL(mdesc_release); static DEFINE_MUTEX(mdesc_mutex); static struct mdesc_notifier_client *client_list; void mdesc_register_notifier(struct mdesc_notifier_client *client) { u64 node; mutex_lock(&mdesc_mutex); client->next = client_list; client_list = client; mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name) client->add(cur_mdesc, node); mutex_unlock(&mdesc_mutex); } static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node) { const u64 *id; u64 a; id = NULL; mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { u64 target; target = mdesc_arc_target(hp, a); id = mdesc_get_property(hp, target, "cfg-handle", NULL); if (id) break; } return id; } /* Run 'func' on nodes which are in A but not in B. */ static void invoke_on_missing(const char *name, struct mdesc_handle *a, struct mdesc_handle *b, void (*func)(struct mdesc_handle *, u64)) { u64 node; mdesc_for_each_node_by_name(a, node, name) { int found = 0, is_vdc_port = 0; const char *name_prop; const u64 *id; u64 fnode; name_prop = mdesc_get_property(a, node, "name", NULL); if (name_prop && !strcmp(name_prop, "vdc-port")) { is_vdc_port = 1; id = parent_cfg_handle(a, node); } else id = mdesc_get_property(a, node, "id", NULL); if (!id) { printk(KERN_ERR "MD: Cannot find ID for %s node.\n", (name_prop ? name_prop : name)); continue; } mdesc_for_each_node_by_name(b, fnode, name) { const u64 *fid; if (is_vdc_port) { name_prop = mdesc_get_property(b, fnode, "name", NULL); if (!name_prop || strcmp(name_prop, "vdc-port")) continue; fid = parent_cfg_handle(b, fnode); if (!fid) { printk(KERN_ERR "MD: Cannot find ID " "for vdc-port node.\n"); continue; } } else fid = mdesc_get_property(b, fnode, "id", NULL); if (*id == *fid) { found = 1; break; } } if (!found) func(a, node); } } static void notify_one(struct mdesc_notifier_client *p, struct mdesc_handle *old_hp, struct mdesc_handle *new_hp) { invoke_on_missing(p->node_name, old_hp, new_hp, p->remove); invoke_on_missing(p->node_name, new_hp, old_hp, p->add); } static void mdesc_notify_clients(struct mdesc_handle *old_hp, struct mdesc_handle *new_hp) { struct mdesc_notifier_client *p = client_list; while (p) { notify_one(p, old_hp, new_hp); p = p->next; } } void mdesc_update(void) { unsigned long len, real_len, status; struct mdesc_handle *hp, *orig_hp; unsigned long flags; mutex_lock(&mdesc_mutex); (void) sun4v_mach_desc(0UL, 0UL, &len); hp = mdesc_alloc(len, &kmalloc_mdesc_memops); if (!hp) { printk(KERN_ERR "MD: mdesc alloc fails\n"); goto out; } status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); if (status != HV_EOK || real_len > len) { printk(KERN_ERR "MD: mdesc reread fails with %lu\n", status); atomic_dec(&hp->refcnt); mdesc_free(hp); goto out; } spin_lock_irqsave(&mdesc_lock, flags); orig_hp = cur_mdesc; cur_mdesc = hp; spin_unlock_irqrestore(&mdesc_lock, flags); mdesc_notify_clients(orig_hp, hp); spin_lock_irqsave(&mdesc_lock, flags); if (atomic_dec_and_test(&orig_hp->refcnt)) mdesc_free(orig_hp); else list_add(&orig_hp->list, &mdesc_zombie_list); spin_unlock_irqrestore(&mdesc_lock, flags); out: mutex_unlock(&mdesc_mutex); } static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) { return (struct mdesc_elem *) (mdesc + 1); } static void *name_block(struct mdesc_hdr *mdesc) { return ((void *) node_block(mdesc)) + mdesc->node_sz; } static void *data_block(struct mdesc_hdr *mdesc) { return ((void *) name_block(mdesc)) + mdesc->name_sz; } u64 mdesc_node_by_name(struct mdesc_handle *hp, u64 from_node, const char *name) { struct mdesc_elem *ep = node_block(&hp->mdesc); const char *names = name_block(&hp->mdesc); u64 last_node = hp->mdesc.node_sz / 16; u64 ret; if (from_node == MDESC_NODE_NULL) { ret = from_node = 0; } else if (from_node >= last_node) { return MDESC_NODE_NULL; } else { ret = ep[from_node].d.val; } while (ret < last_node) { if (ep[ret].tag != MD_NODE) return MDESC_NODE_NULL; if (!strcmp(names + ep[ret].name_offset, name)) break; ret = ep[ret].d.val; } if (ret >= last_node) ret = MDESC_NODE_NULL; return ret; } EXPORT_SYMBOL(mdesc_node_by_name); const void *mdesc_get_property(struct mdesc_handle *hp, u64 node, const char *name, int *lenp) { const char *names = name_block(&hp->mdesc); u64 last_node = hp->mdesc.node_sz / 16; void *data = data_block(&hp->mdesc); struct mdesc_elem *ep; if (node == MDESC_NODE_NULL || node >= last_node) return NULL; ep = node_block(&hp->mdesc) + node; ep++; for (; ep->tag != MD_NODE_END; ep++) { void *val = NULL; int len = 0; switch (ep->tag) { case MD_PROP_VAL: val = &ep->d.val; len = 8; break; case MD_PROP_STR: case MD_PROP_DATA: val = data + ep->d.data.data_offset; len = ep->d.data.data_len; break; default: break; } if (!val) continue; if (!strcmp(names + ep->name_offset, name)) { if (lenp) *lenp = len; return val; } } return NULL; } EXPORT_SYMBOL(mdesc_get_property); u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type) { struct mdesc_elem *ep, *base = node_block(&hp->mdesc); const char *names = name_block(&hp->mdesc); u64 last_node = hp->mdesc.node_sz / 16; if (from == MDESC_NODE_NULL || from >= last_node) return MDESC_NODE_NULL; ep = base + from; ep++; for (; ep->tag != MD_NODE_END; ep++) { if (ep->tag != MD_PROP_ARC) continue; if (strcmp(names + ep->name_offset, arc_type)) continue; return ep - base; } return MDESC_NODE_NULL; } EXPORT_SYMBOL(mdesc_next_arc); u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc) { struct mdesc_elem *ep, *base = node_block(&hp->mdesc); ep = base + arc; return ep->d.val; } EXPORT_SYMBOL(mdesc_arc_target); const char *mdesc_node_name(struct mdesc_handle *hp, u64 node) { struct mdesc_elem *ep, *base = node_block(&hp->mdesc); const char *names = name_block(&hp->mdesc); u64 last_node = hp->mdesc.node_sz / 16; if (node == MDESC_NODE_NULL || node >= last_node) return NULL; ep = base + node; if (ep->tag != MD_NODE) return NULL; return names + ep->name_offset; } EXPORT_SYMBOL(mdesc_node_name); static u64 max_cpus = 64; static void __init report_platform_properties(void) { struct mdesc_handle *hp = mdesc_grab(); u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); const char *s; const u64 *v; if (pn == MDESC_NODE_NULL) { prom_printf("No platform node in machine-description.\n"); prom_halt(); } s = mdesc_get_property(hp, pn, "banner-name", NULL); printk("PLATFORM: banner-name [%s]\n", s); s = mdesc_get_property(hp, pn, "name", NULL); printk("PLATFORM: name [%s]\n", s); v = mdesc_get_property(hp, pn, "hostid", NULL); if (v) printk("PLATFORM: hostid [%08llx]\n", *v); v = mdesc_get_property(hp, pn, "serial#", NULL); if (v) printk("PLATFORM: serial# [%08llx]\n", *v); v = mdesc_get_property(hp, pn, "stick-frequency", NULL); printk("PLATFORM: stick-frequency [%08llx]\n", *v); v = mdesc_get_property(hp, pn, "mac-address", NULL); if (v) printk("PLATFORM: mac-address [%llx]\n", *v); v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL); if (v) printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v); v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL); if (v) printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v); v = mdesc_get_property(hp, pn, "max-cpus", NULL); if (v) { max_cpus = *v; printk("PLATFORM: max-cpus [%llu]\n", max_cpus); } #ifdef CONFIG_SMP { int max_cpu, i; if (v) { max_cpu = *v; if (max_cpu > NR_CPUS) max_cpu = NR_CPUS; } else { max_cpu = NR_CPUS; } for (i = 0; i < max_cpu; i++) set_cpu_possible(i, true); } #endif mdesc_release(hp); } static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) { const u64 *level = mdesc_get_property(hp, mp, "level", NULL); const u64 *size = mdesc_get_property(hp, mp, "size", NULL); const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL); const char *type; int type_len; type = mdesc_get_property(hp, mp, "type", &type_len); switch (*level) { case 1: if (of_find_in_proplist(type, "instn", type_len)) { c->icache_size = *size; c->icache_line_size = *line_size; } else if (of_find_in_proplist(type, "data", type_len)) { c->dcache_size = *size; c->dcache_line_size = *line_size; } break; case 2: c->ecache_size = *size; c->ecache_line_size = *line_size; break; default: break; } if (*level == 1) { u64 a; mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(hp, a); const char *name = mdesc_node_name(hp, target); if (!strcmp(name, "cache")) fill_in_one_cache(c, hp, target); } } } static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) { u64 a; mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { u64 t = mdesc_arc_target(hp, a); const char *name; const u64 *id; name = mdesc_node_name(hp, t); if (!strcmp(name, "cpu")) { id = mdesc_get_property(hp, t, "id", NULL); if (*id < NR_CPUS) cpu_data(*id).core_id = core_id; } else { u64 j; mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { u64 n = mdesc_arc_target(hp, j); const char *n_name; n_name = mdesc_node_name(hp, n); if (strcmp(n_name, "cpu")) continue; id = mdesc_get_property(hp, n, "id", NULL); if (*id < NR_CPUS) cpu_data(*id).core_id = core_id; } } } } static void __cpuinit set_core_ids(struct mdesc_handle *hp) { int idx; u64 mp; idx = 1; mdesc_for_each_node_by_name(hp, mp, "cache") { const u64 *level; const char *type; int len; level = mdesc_get_property(hp, mp, "level", NULL); if (*level != 1) continue; type = mdesc_get_property(hp, mp, "type", &len); if (!of_find_in_proplist(type, "instn", len)) continue; mark_core_ids(hp, mp, idx); idx++; } } static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) { u64 a; mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { u64 t = mdesc_arc_target(hp, a); const char *name; const u64 *id; name = mdesc_node_name(hp, t); if (strcmp(name, "cpu")) continue; id = mdesc_get_property(hp, t, "id", NULL); if (*id < NR_CPUS) cpu_data(*id).proc_id = proc_id; } } static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) { int idx; u64 mp; idx = 0; mdesc_for_each_node_by_name(hp, mp, exec_unit_name) { const char *type; int len; type = mdesc_get_property(hp, mp, "type", &len); if (!of_find_in_proplist(type, "int", len) && !of_find_in_proplist(type, "integer", len)) continue; mark_proc_ids(hp, mp, idx); idx++; } } static void __cpuinit set_proc_ids(struct mdesc_handle *hp) { __set_proc_ids(hp, "exec_unit"); __set_proc_ids(hp, "exec-unit"); } static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned long def, unsigned long max) { u64 val; if (!p) goto use_default; val = *p; if (!val || val >= 64) goto use_default; if (val > max) val = max; *mask = ((1U << val) * 64U) - 1U; return; use_default: *mask = ((1U << def) * 64U) - 1U; } static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, struct trap_per_cpu *tb) { static int printed; const u64 *val; val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL); get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2)); val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL); get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8); val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL); get_one_mondo_bits(val, &tb->resum_qmask, 6, 7); val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL); get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2); if (!printed++) { pr_info("SUN4V: Mondo queue sizes " "[cpu(%u) dev(%u) r(%u) nr(%u)]\n", tb->cpu_mondo_qmask + 1, tb->dev_mondo_qmask + 1, tb->resum_qmask + 1, tb->nonresum_qmask + 1); } } static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) { struct mdesc_handle *hp = mdesc_grab(); void *ret = NULL; u64 mp; mdesc_for_each_node_by_name(hp, mp, "cpu") { const u64 *id = mdesc_get_property(hp, mp, "id", NULL); int cpuid = *id; #ifdef CONFIG_SMP if (cpuid >= NR_CPUS) { printk(KERN_WARNING "Ignoring CPU %d which is " ">= NR_CPUS (%d)\n", cpuid, NR_CPUS); continue; } if (!cpumask_test_cpu(cpuid, mask)) continue; #endif ret = func(hp, mp, cpuid, arg); if (ret) goto out; } out: mdesc_release(hp); return ret; } static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) { ncpus_probed++; #ifdef CONFIG_SMP set_cpu_present(cpuid, true); #endif return NULL; } void __cpuinit mdesc_populate_present_mask(cpumask_t *mask) { if (tlb_type != hypervisor) return; ncpus_probed = 0; mdesc_iterate_over_cpus(record_one_cpu, NULL, mask); } static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) { const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); struct trap_per_cpu *tb; cpuinfo_sparc *c; u64 a; #ifndef CONFIG_SMP /* On uniprocessor we only want the values for the * real physical cpu the kernel booted onto, however * cpu_data() only has one entry at index 0. */ if (cpuid != real_hard_smp_processor_id()) return NULL; cpuid = 0; #endif c = &cpu_data(cpuid); c->clock_tick = *cfreq; tb = &trap_block[cpuid]; get_mondo_data(hp, mp, tb); mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { u64 j, t = mdesc_arc_target(hp, a); const char *t_name; t_name = mdesc_node_name(hp, t); if (!strcmp(t_name, "cache")) { fill_in_one_cache(c, hp, t); continue; } mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { u64 n = mdesc_arc_target(hp, j); const char *n_name; n_name = mdesc_node_name(hp, n); if (!strcmp(n_name, "cache")) fill_in_one_cache(c, hp, n); } } c->core_id = 0; c->proc_id = -1; return NULL; } void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask) { struct mdesc_handle *hp; mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask); #ifdef CONFIG_SMP sparc64_multi_core = 1; #endif hp = mdesc_grab(); set_core_ids(hp); set_proc_ids(hp); mdesc_release(hp); smp_fill_in_sib_core_maps(); } static ssize_t mdesc_read(struct file *file, char __user *buf, size_t len, loff_t *offp) { struct mdesc_handle *hp = mdesc_grab(); int err; if (!hp) return -ENODEV; err = hp->handle_size; if (len < hp->handle_size) err = -EMSGSIZE; else if (copy_to_user(buf, &hp->mdesc, hp->handle_size)) err = -EFAULT; mdesc_release(hp); return err; } static const struct file_operations mdesc_fops = { .read = mdesc_read, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice mdesc_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "mdesc", .fops = &mdesc_fops, }; static int __init mdesc_misc_init(void) { return misc_register(&mdesc_misc); } __initcall(mdesc_misc_init); void __init sun4v_mdesc_init(void) { struct mdesc_handle *hp; unsigned long len, real_len, status; (void) sun4v_mach_desc(0UL, 0UL, &len); printk("MDESC: Size is %lu bytes.\n", len); hp = mdesc_alloc(len, &memblock_mdesc_ops); if (hp == NULL) { prom_printf("MDESC: alloc of %lu bytes failed.\n", len); prom_halt(); } status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len); if (status != HV_EOK || real_len > len) { prom_printf("sun4v_mach_desc fails, err(%lu), " "len(%lu), real_len(%lu)\n", status, len, real_len); mdesc_free(hp); prom_halt(); } cur_mdesc = hp; report_platform_properties(); }
gpl-2.0
SlimSaber/kernel_sony_msm8974
drivers/rtc/rtc-pxa.c
4925
11478
/* * Real Time Clock interface for XScale PXA27x and PXA3xx * * Copyright (C) 2008 Robert Jarzmik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/slab.h> #include <mach/hardware.h> #define TIMER_FREQ CLOCK_TICK_RATE #define RTC_DEF_DIVIDER (32768 - 1) #define RTC_DEF_TRIM 0 #define MAXFREQ_PERIODIC 1000 /* * PXA Registers and bits definitions */ #define RTSR_PICE (1 << 15) /* Periodic interrupt count enable */ #define RTSR_PIALE (1 << 14) /* Periodic interrupt Alarm enable */ #define RTSR_PIAL (1 << 13) /* Periodic interrupt detected */ #define RTSR_SWALE2 (1 << 11) /* RTC stopwatch alarm2 enable */ #define RTSR_SWAL2 (1 << 10) /* RTC stopwatch alarm2 detected */ #define RTSR_SWALE1 (1 << 9) /* RTC stopwatch alarm1 enable */ #define RTSR_SWAL1 (1 << 8) /* RTC stopwatch alarm1 detected */ #define RTSR_RDALE2 (1 << 7) /* RTC alarm2 enable */ #define RTSR_RDAL2 (1 << 6) /* RTC alarm2 detected */ #define RTSR_RDALE1 (1 << 5) /* RTC alarm1 enable */ #define RTSR_RDAL1 (1 << 4) /* RTC alarm1 detected */ #define RTSR_HZE (1 << 3) /* HZ interrupt enable */ #define RTSR_ALE (1 << 2) /* RTC alarm interrupt enable */ #define RTSR_HZ (1 << 1) /* HZ rising-edge detected */ #define RTSR_AL (1 << 0) /* RTC alarm detected */ #define RTSR_TRIG_MASK (RTSR_AL | RTSR_HZ | RTSR_RDAL1 | RTSR_RDAL2\ | RTSR_SWAL1 | RTSR_SWAL2) #define RYxR_YEAR_S 9 #define RYxR_YEAR_MASK (0xfff << RYxR_YEAR_S) #define RYxR_MONTH_S 5 #define RYxR_MONTH_MASK (0xf << RYxR_MONTH_S) #define RYxR_DAY_MASK 0x1f #define RDxR_HOUR_S 12 #define RDxR_HOUR_MASK (0x1f << RDxR_HOUR_S) #define RDxR_MIN_S 6 #define RDxR_MIN_MASK (0x3f << RDxR_MIN_S) #define RDxR_SEC_MASK 0x3f #define RTSR 0x08 #define RTTR 0x0c #define RDCR 0x10 #define RYCR 0x14 #define RDAR1 0x18 #define RYAR1 0x1c #define RTCPICR 0x34 #define PIAR 0x38 #define rtc_readl(pxa_rtc, reg) \ __raw_readl((pxa_rtc)->base + (reg)) #define rtc_writel(pxa_rtc, reg, value) \ __raw_writel((value), (pxa_rtc)->base + (reg)) struct pxa_rtc { struct resource *ress; void __iomem *base; int irq_1Hz; int irq_Alrm; struct rtc_device *rtc; spinlock_t lock; /* Protects this structure */ }; static u32 ryxr_calc(struct rtc_time *tm) { return ((tm->tm_year + 1900) << RYxR_YEAR_S) | ((tm->tm_mon + 1) << RYxR_MONTH_S) | tm->tm_mday; } static u32 rdxr_calc(struct rtc_time *tm) { return (tm->tm_hour << RDxR_HOUR_S) | (tm->tm_min << RDxR_MIN_S) | tm->tm_sec; } static void tm_calc(u32 rycr, u32 rdcr, struct rtc_time *tm) { tm->tm_year = ((rycr & RYxR_YEAR_MASK) >> RYxR_YEAR_S) - 1900; tm->tm_mon = (((rycr & RYxR_MONTH_MASK) >> RYxR_MONTH_S)) - 1; tm->tm_mday = (rycr & RYxR_DAY_MASK); tm->tm_hour = (rdcr & RDxR_HOUR_MASK) >> RDxR_HOUR_S; tm->tm_min = (rdcr & RDxR_MIN_MASK) >> RDxR_MIN_S; tm->tm_sec = rdcr & RDxR_SEC_MASK; } static void rtsr_clear_bits(struct pxa_rtc *pxa_rtc, u32 mask) { u32 rtsr; rtsr = rtc_readl(pxa_rtc, RTSR); rtsr &= ~RTSR_TRIG_MASK; rtsr &= ~mask; rtc_writel(pxa_rtc, RTSR, rtsr); } static void rtsr_set_bits(struct pxa_rtc *pxa_rtc, u32 mask) { u32 rtsr; rtsr = rtc_readl(pxa_rtc, RTSR); rtsr &= ~RTSR_TRIG_MASK; rtsr |= mask; rtc_writel(pxa_rtc, RTSR, rtsr); } static irqreturn_t pxa_rtc_irq(int irq, void *dev_id) { struct platform_device *pdev = to_platform_device(dev_id); struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); u32 rtsr; unsigned long events = 0; spin_lock(&pxa_rtc->lock); /* clear interrupt sources */ rtsr = rtc_readl(pxa_rtc, RTSR); rtc_writel(pxa_rtc, RTSR, rtsr); /* temporary disable rtc interrupts */ rtsr_clear_bits(pxa_rtc, RTSR_RDALE1 | RTSR_PIALE | RTSR_HZE); /* clear alarm interrupt if it has occurred */ if (rtsr & RTSR_RDAL1) rtsr &= ~RTSR_RDALE1; /* update irq data & counter */ if (rtsr & RTSR_RDAL1) events |= RTC_AF | RTC_IRQF; if (rtsr & RTSR_HZ) events |= RTC_UF | RTC_IRQF; if (rtsr & RTSR_PIAL) events |= RTC_PF | RTC_IRQF; rtc_update_irq(pxa_rtc->rtc, 1, events); /* enable back rtc interrupts */ rtc_writel(pxa_rtc, RTSR, rtsr & ~RTSR_TRIG_MASK); spin_unlock(&pxa_rtc->lock); return IRQ_HANDLED; } static int pxa_rtc_open(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); int ret; ret = request_irq(pxa_rtc->irq_1Hz, pxa_rtc_irq, 0, "rtc 1Hz", dev); if (ret < 0) { dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_1Hz, ret); goto err_irq_1Hz; } ret = request_irq(pxa_rtc->irq_Alrm, pxa_rtc_irq, 0, "rtc Alrm", dev); if (ret < 0) { dev_err(dev, "can't get irq %i, err %d\n", pxa_rtc->irq_Alrm, ret); goto err_irq_Alrm; } return 0; err_irq_Alrm: free_irq(pxa_rtc->irq_1Hz, dev); err_irq_1Hz: return ret; } static void pxa_rtc_release(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); spin_lock_irq(&pxa_rtc->lock); rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE); spin_unlock_irq(&pxa_rtc->lock); free_irq(pxa_rtc->irq_Alrm, dev); free_irq(pxa_rtc->irq_1Hz, dev); } static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); spin_lock_irq(&pxa_rtc->lock); if (enabled) rtsr_set_bits(pxa_rtc, RTSR_RDALE1); else rtsr_clear_bits(pxa_rtc, RTSR_RDALE1); spin_unlock_irq(&pxa_rtc->lock); return 0; } static int pxa_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rycr, rdcr; rycr = rtc_readl(pxa_rtc, RYCR); rdcr = rtc_readl(pxa_rtc, RDCR); tm_calc(rycr, rdcr, tm); return 0; } static int pxa_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); rtc_writel(pxa_rtc, RYCR, ryxr_calc(tm)); rtc_writel(pxa_rtc, RDCR, rdxr_calc(tm)); return 0; } static int pxa_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rtsr, ryar, rdar; ryar = rtc_readl(pxa_rtc, RYAR1); rdar = rtc_readl(pxa_rtc, RDAR1); tm_calc(ryar, rdar, &alrm->time); rtsr = rtc_readl(pxa_rtc, RTSR); alrm->enabled = (rtsr & RTSR_RDALE1) ? 1 : 0; alrm->pending = (rtsr & RTSR_RDAL1) ? 1 : 0; return 0; } static int pxa_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); u32 rtsr; spin_lock_irq(&pxa_rtc->lock); rtc_writel(pxa_rtc, RYAR1, ryxr_calc(&alrm->time)); rtc_writel(pxa_rtc, RDAR1, rdxr_calc(&alrm->time)); rtsr = rtc_readl(pxa_rtc, RTSR); if (alrm->enabled) rtsr |= RTSR_RDALE1; else rtsr &= ~RTSR_RDALE1; rtc_writel(pxa_rtc, RTSR, rtsr); spin_unlock_irq(&pxa_rtc->lock); return 0; } static int pxa_rtc_proc(struct device *dev, struct seq_file *seq) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); seq_printf(seq, "trim/divider\t: 0x%08x\n", rtc_readl(pxa_rtc, RTTR)); seq_printf(seq, "update_IRQ\t: %s\n", (rtc_readl(pxa_rtc, RTSR) & RTSR_HZE) ? "yes" : "no"); seq_printf(seq, "periodic_IRQ\t: %s\n", (rtc_readl(pxa_rtc, RTSR) & RTSR_PIALE) ? "yes" : "no"); seq_printf(seq, "periodic_freq\t: %u\n", rtc_readl(pxa_rtc, PIAR)); return 0; } static const struct rtc_class_ops pxa_rtc_ops = { .open = pxa_rtc_open, .release = pxa_rtc_release, .read_time = pxa_rtc_read_time, .set_time = pxa_rtc_set_time, .read_alarm = pxa_rtc_read_alarm, .set_alarm = pxa_rtc_set_alarm, .alarm_irq_enable = pxa_alarm_irq_enable, .proc = pxa_rtc_proc, }; static int __init pxa_rtc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct pxa_rtc *pxa_rtc; int ret; u32 rttr; pxa_rtc = kzalloc(sizeof(struct pxa_rtc), GFP_KERNEL); if (!pxa_rtc) return -ENOMEM; spin_lock_init(&pxa_rtc->lock); platform_set_drvdata(pdev, pxa_rtc); ret = -ENXIO; pxa_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!pxa_rtc->ress) { dev_err(dev, "No I/O memory resource defined\n"); goto err_ress; } pxa_rtc->irq_1Hz = platform_get_irq(pdev, 0); if (pxa_rtc->irq_1Hz < 0) { dev_err(dev, "No 1Hz IRQ resource defined\n"); goto err_ress; } pxa_rtc->irq_Alrm = platform_get_irq(pdev, 1); if (pxa_rtc->irq_Alrm < 0) { dev_err(dev, "No alarm IRQ resource defined\n"); goto err_ress; } ret = -ENOMEM; pxa_rtc->base = ioremap(pxa_rtc->ress->start, resource_size(pxa_rtc->ress)); if (!pxa_rtc->base) { dev_err(&pdev->dev, "Unable to map pxa RTC I/O memory\n"); goto err_map; } /* * If the clock divider is uninitialized then reset it to the * default value to get the 1Hz clock. */ if (rtc_readl(pxa_rtc, RTTR) == 0) { rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16); rtc_writel(pxa_rtc, RTTR, rttr); dev_warn(dev, "warning: initializing default clock" " divider/trim value\n"); } rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_RDALE1 | RTSR_HZE); pxa_rtc->rtc = rtc_device_register("pxa-rtc", &pdev->dev, &pxa_rtc_ops, THIS_MODULE); ret = PTR_ERR(pxa_rtc->rtc); if (IS_ERR(pxa_rtc->rtc)) { dev_err(dev, "Failed to register RTC device -> %d\n", ret); goto err_rtc_reg; } device_init_wakeup(dev, 1); return 0; err_rtc_reg: iounmap(pxa_rtc->base); err_ress: err_map: kfree(pxa_rtc); return ret; } static int __exit pxa_rtc_remove(struct platform_device *pdev) { struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); rtc_device_unregister(pxa_rtc->rtc); spin_lock_irq(&pxa_rtc->lock); iounmap(pxa_rtc->base); spin_unlock_irq(&pxa_rtc->lock); kfree(pxa_rtc); return 0; } #ifdef CONFIG_PM static int pxa_rtc_suspend(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(pxa_rtc->irq_Alrm); return 0; } static int pxa_rtc_resume(struct device *dev) { struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(pxa_rtc->irq_Alrm); return 0; } static const struct dev_pm_ops pxa_rtc_pm_ops = { .suspend = pxa_rtc_suspend, .resume = pxa_rtc_resume, }; #endif static struct platform_driver pxa_rtc_driver = { .remove = __exit_p(pxa_rtc_remove), .driver = { .name = "pxa-rtc", #ifdef CONFIG_PM .pm = &pxa_rtc_pm_ops, #endif }, }; static int __init pxa_rtc_init(void) { if (cpu_is_pxa27x() || cpu_is_pxa3xx()) return platform_driver_probe(&pxa_rtc_driver, pxa_rtc_probe); return -ENODEV; } static void __exit pxa_rtc_exit(void) { platform_driver_unregister(&pxa_rtc_driver); } module_init(pxa_rtc_init); module_exit(pxa_rtc_exit); MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); MODULE_DESCRIPTION("PXA27x/PXA3xx Realtime Clock Driver (RTC)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa-rtc");
gpl-2.0
TeamExodus/kernel_xiaomi_cancro
drivers/net/hamradio/bpqether.c
4925
14882
/* * G8BPQ compatible "AX.25 via ethernet" driver release 004 * * This code REQUIRES 2.0.0 or higher/ NET3.029 * * This module: * This module is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This is a "pseudo" network driver to allow AX.25 over Ethernet * using G8BPQ encapsulation. It has been extracted from the protocol * implementation because * * - things got unreadable within the protocol stack * - to cure the protocol stack from "feature-ism" * - a protocol implementation shouldn't need to know on * which hardware it is running * - user-level programs like the AX.25 utilities shouldn't * need to know about the hardware. * - IP over ethernet encapsulated AX.25 was impossible * - rxecho.c did not work * - to have room for extensions * - it just deserves to "live" as an own driver * * This driver can use any ethernet destination address, and can be * limited to accept frames from one dedicated ethernet card only. * * Note that the driver sets up the BPQ devices automagically on * startup or (if started before the "insmod" of an ethernet device) * on "ifconfig up". It hopefully will remove the BPQ on "rmmod"ing * the ethernet device (in fact: as soon as another ethernet or bpq * device gets "ifconfig"ured). * * I have heard that several people are thinking of experiments * with highspeed packet radio using existing ethernet cards. * Well, this driver is prepared for this purpose, just add * your tx key control and a txdelay / tailtime algorithm, * probably some buffering, and /voila/... * * History * BPQ 001 Joerg(DL1BKE) Extracted BPQ code from AX.25 * protocol stack and added my own * yet existing patches * BPQ 002 Joerg(DL1BKE) Scan network device list on * startup. * BPQ 003 Joerg(DL1BKE) Ethernet destination address * and accepted source address * can be configured by an ioctl() * call. * Fixed to match Linux networking * changes - 2.1.15. * BPQ 004 Joerg(DL1BKE) Fixed to not lock up on ifconfig. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <net/ip.h> #include <net/arp.h> #include <net/net_namespace.h> #include <linux/bpqether.h> static const char banner[] __initdata = KERN_INFO \ "AX.25: bpqether driver version 004\n"; static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF}; static char bpq_eth_addr[6]; static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int bpq_device_event(struct notifier_block *, unsigned long, void *); static struct packet_type bpq_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_BPQ), .func = bpq_rcv, }; static struct notifier_block bpq_dev_notifier = { .notifier_call =bpq_device_event, }; struct bpqdev { struct list_head bpq_list; /* list of bpq devices chain */ struct net_device *ethdev; /* link to ethernet device */ struct net_device *axdev; /* bpq device (bpq#) */ char dest_addr[6]; /* ether destination address */ char acpt_addr[6]; /* accept ether frames from this address only */ }; static LIST_HEAD(bpq_devices); /* * bpqether network devices are paired with ethernet devices below them, so * form a special "super class" of normal ethernet devices; split their locks * off into a separate class since they always nest. */ static struct lock_class_key bpq_netdev_xmit_lock_key; static struct lock_class_key bpq_netdev_addr_lock_key; static void bpq_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); } static void bpq_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); } /* ------------------------------------------------------------------------ */ /* * Get the ethernet device for a BPQ device */ static inline struct net_device *bpq_get_ether_dev(struct net_device *dev) { struct bpqdev *bpq = netdev_priv(dev); return bpq ? bpq->ethdev : NULL; } /* * Get the BPQ device for the ethernet device */ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev) { struct bpqdev *bpq; list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) { if (bpq->ethdev == dev) return bpq->axdev; } return NULL; } static inline int dev_is_ethdev(struct net_device *dev) { return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); } /* ------------------------------------------------------------------------ */ /* * Receive an AX.25 frame via an ethernet interface. */ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { int len; char * ptr; struct ethhdr *eth; struct bpqdev *bpq; if (!net_eq(dev_net(dev), &init_net)) goto drop; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; if (!pskb_may_pull(skb, sizeof(struct ethhdr))) goto drop; rcu_read_lock(); dev = bpq_get_ax25_dev(dev); if (dev == NULL || !netif_running(dev)) goto drop_unlock; /* * if we want to accept frames from just one ethernet device * we check the source address of the sender. */ bpq = netdev_priv(dev); eth = eth_hdr(skb); if (!(bpq->acpt_addr[0] & 0x01) && memcmp(eth->h_source, bpq->acpt_addr, ETH_ALEN)) goto drop_unlock; if (skb_cow(skb, sizeof(struct ethhdr))) goto drop_unlock; len = skb->data[0] + skb->data[1] * 256 - 5; skb_pull(skb, 2); /* Remove the length bytes */ skb_trim(skb, len); /* Set the length of the data */ dev->stats.rx_packets++; dev->stats.rx_bytes += len; ptr = skb_push(skb, 1); *ptr = 0; skb->protocol = ax25_type_trans(skb, dev); netif_rx(skb); unlock: rcu_read_unlock(); return 0; drop_unlock: kfree_skb(skb); goto unlock; drop: kfree_skb(skb); return 0; } /* * Send an AX.25 frame via an ethernet interface */ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned char *ptr; struct bpqdev *bpq; struct net_device *orig_dev; int size; /* * Just to be *really* sure not to send anything if the interface * is down, the ethernet device may have gone. */ if (!netif_running(dev)) { kfree_skb(skb); return NETDEV_TX_OK; } skb_pull(skb, 1); /* Drop KISS byte */ size = skb->len; /* * We're about to mess with the skb which may still shared with the * generic networking code so unshare and ensure it's got enough * space for the BPQ headers. */ if (skb_cow(skb, AX25_BPQ_HEADER_LEN)) { if (net_ratelimit()) pr_err("bpqether: out of memory\n"); kfree_skb(skb); return NETDEV_TX_OK; } ptr = skb_push(skb, 2); /* Make space for length */ *ptr++ = (size + 5) % 256; *ptr++ = (size + 5) / 256; bpq = netdev_priv(dev); orig_dev = dev; if ((dev = bpq_get_ether_dev(dev)) == NULL) { orig_dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } skb->protocol = ax25_type_trans(skb, dev); skb_reset_network_header(skb); dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); dev->stats.tx_packets++; dev->stats.tx_bytes+=skb->len; dev_queue_xmit(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } /* * Set AX.25 callsign */ static int bpq_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = (struct sockaddr *)addr; memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); return 0; } /* Ioctl commands * * SIOCSBPQETHOPT reserved for enhancements * SIOCSBPQETHADDR set the destination and accepted * source ethernet address (broadcast * or multicast: accept all) */ static int bpq_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct bpq_ethaddr __user *ethaddr = ifr->ifr_data; struct bpqdev *bpq = netdev_priv(dev); struct bpq_req req; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case SIOCSBPQETHOPT: if (copy_from_user(&req, ifr->ifr_data, sizeof(struct bpq_req))) return -EFAULT; switch (req.cmd) { case SIOCGBPQETHPARAM: case SIOCSBPQETHPARAM: default: return -EINVAL; } break; case SIOCSBPQETHADDR: if (copy_from_user(bpq->dest_addr, ethaddr->destination, ETH_ALEN)) return -EFAULT; if (copy_from_user(bpq->acpt_addr, ethaddr->accept, ETH_ALEN)) return -EFAULT; break; default: return -EINVAL; } return 0; } /* * open/close a device */ static int bpq_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static int bpq_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } /* ------------------------------------------------------------------------ */ /* * Proc filesystem */ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { int i = 1; struct bpqdev *bpqdev; rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) { if (i == *pos) return bpqdev; } return NULL; } static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct list_head *p; struct bpqdev *bpqdev = v; ++*pos; if (v == SEQ_START_TOKEN) p = rcu_dereference(list_next_rcu(&bpq_devices)); else p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list)); return (p == &bpq_devices) ? NULL : list_entry(p, struct bpqdev, bpq_list); } static void bpq_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int bpq_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "dev ether destination accept from\n"); else { const struct bpqdev *bpqdev = v; seq_printf(seq, "%-5s %-10s %pM ", bpqdev->axdev->name, bpqdev->ethdev->name, bpqdev->dest_addr); if (is_multicast_ether_addr(bpqdev->acpt_addr)) seq_printf(seq, "*\n"); else seq_printf(seq, "%pM\n", bpqdev->acpt_addr); } return 0; } static const struct seq_operations bpq_seqops = { .start = bpq_seq_start, .next = bpq_seq_next, .stop = bpq_seq_stop, .show = bpq_seq_show, }; static int bpq_info_open(struct inode *inode, struct file *file) { return seq_open(file, &bpq_seqops); } static const struct file_operations bpq_info_fops = { .owner = THIS_MODULE, .open = bpq_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* ------------------------------------------------------------------------ */ static const struct net_device_ops bpq_netdev_ops = { .ndo_open = bpq_open, .ndo_stop = bpq_close, .ndo_start_xmit = bpq_xmit, .ndo_set_mac_address = bpq_set_mac_address, .ndo_do_ioctl = bpq_ioctl, }; static void bpq_setup(struct net_device *dev) { dev->netdev_ops = &bpq_netdev_ops; dev->destructor = free_netdev; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); dev->flags = 0; #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) dev->header_ops = &ax25_header_ops; #endif dev->type = ARPHRD_AX25; dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; dev->mtu = AX25_DEF_PACLEN; dev->addr_len = AX25_ADDR_LEN; } /* * Setup a new device. */ static int bpq_new_device(struct net_device *edev) { int err; struct net_device *ndev; struct bpqdev *bpq; ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d", bpq_setup); if (!ndev) return -ENOMEM; bpq = netdev_priv(ndev); dev_hold(edev); bpq->ethdev = edev; bpq->axdev = ndev; memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr)); memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr)); err = register_netdevice(ndev); if (err) goto error; bpq_set_lockdep_class(ndev); /* List protected by RTNL */ list_add_rcu(&bpq->bpq_list, &bpq_devices); return 0; error: dev_put(edev); free_netdev(ndev); return err; } static void bpq_free_device(struct net_device *ndev) { struct bpqdev *bpq = netdev_priv(ndev); dev_put(bpq->ethdev); list_del_rcu(&bpq->bpq_list); unregister_netdevice(ndev); } /* * Handle device status changes. */ static int bpq_device_event(struct notifier_block *this,unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (!dev_is_ethdev(dev)) return NOTIFY_DONE; switch (event) { case NETDEV_UP: /* new ethernet device -> new BPQ interface */ if (bpq_get_ax25_dev(dev) == NULL) bpq_new_device(dev); break; case NETDEV_DOWN: /* ethernet device closed -> close BPQ interface */ if ((dev = bpq_get_ax25_dev(dev)) != NULL) dev_close(dev); break; case NETDEV_UNREGISTER: /* ethernet device removed -> free BPQ interface */ if ((dev = bpq_get_ax25_dev(dev)) != NULL) bpq_free_device(dev); break; default: break; } return NOTIFY_DONE; } /* ------------------------------------------------------------------------ */ /* * Initialize driver. To be called from af_ax25 if not compiled as a * module */ static int __init bpq_init_driver(void) { #ifdef CONFIG_PROC_FS if (!proc_net_fops_create(&init_net, "bpqether", S_IRUGO, &bpq_info_fops)) { printk(KERN_ERR "bpq: cannot create /proc/net/bpqether entry.\n"); return -ENOENT; } #endif /* CONFIG_PROC_FS */ dev_add_pack(&bpq_packet_type); register_netdevice_notifier(&bpq_dev_notifier); printk(banner); return 0; } static void __exit bpq_cleanup_driver(void) { struct bpqdev *bpq; dev_remove_pack(&bpq_packet_type); unregister_netdevice_notifier(&bpq_dev_notifier); proc_net_remove(&init_net, "bpqether"); rtnl_lock(); while (!list_empty(&bpq_devices)) { bpq = list_entry(bpq_devices.next, struct bpqdev, bpq_list); bpq_free_device(bpq->axdev); } rtnl_unlock(); } MODULE_AUTHOR("Joerg Reuter DL1BKE <jreuter@yaina.de>"); MODULE_DESCRIPTION("Transmit and receive AX.25 packets over Ethernet"); MODULE_LICENSE("GPL"); module_init(bpq_init_driver); module_exit(bpq_cleanup_driver);
gpl-2.0
computersforpeace/UBIFS-backports
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
5181
4990
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/mlx4/driver.h> #include "mlx4_en.h" static int mlx4_en_test_registers(struct mlx4_en_priv *priv) { return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) { struct sk_buff *skb; struct ethhdr *ethh; unsigned char *packet; unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD; unsigned int i; int err; /* build the pkt before xmit */ skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); if (!skb) { en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n"); return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); packet = (unsigned char *)skb_put(skb, packet_size); memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); memset(ethh->h_source, 0, ETH_ALEN); ethh->h_proto = htons(ETH_P_ARP); skb_set_mac_header(skb, 0); for (i = 0; i < packet_size; ++i) /* fill our packet */ packet[i] = (unsigned char)(i & 0xff); /* xmit the pkt */ err = mlx4_en_xmit(skb, priv->dev); return err; } static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) { u32 loopback_ok = 0; int i; priv->loopback_ok = 0; priv->validate_loopback = 1; /* xmit */ if (mlx4_en_test_loopback_xmit(priv)) { en_err(priv, "Transmitting loopback packet failed\n"); goto mlx4_en_test_loopback_exit; } /* polling for result */ for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) { msleep(MLX4_EN_LOOPBACK_TIMEOUT); if (priv->loopback_ok) { loopback_ok = 1; break; } } if (!loopback_ok) en_err(priv, "Loopback packet didn't arrive\n"); mlx4_en_test_loopback_exit: priv->validate_loopback = 0; return !loopback_ok; } static int mlx4_en_test_link(struct mlx4_en_priv *priv) { if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; if (priv->port_state.link_state == 1) return 0; else return 1; } static int mlx4_en_test_speed(struct mlx4_en_priv *priv) { if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; /* The device currently only supports 10G speed */ if (priv->port_state.link_speed != SPEED_10000) return priv->port_state.link_speed; return 0; } void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *tx_ring; int i, carrier_ok; memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); if (*flags & ETH_TEST_FL_OFFLINE) { /* disable the interface */ carrier_ok = netif_carrier_ok(dev); netif_carrier_off(dev); retry_tx: /* Wait until all tx queues are empty. * there should not be any additional incoming traffic * since we turned the carrier off */ msleep(200); for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { tx_ring = &priv->tx_ring[i]; if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) goto retry_tx; } if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { buf[3] = mlx4_en_test_registers(priv); buf[4] = mlx4_en_test_loopback(priv); } if (carrier_ok) netif_carrier_on(dev); } buf[0] = mlx4_test_interrupts(mdev->dev); buf[1] = mlx4_en_test_link(priv); buf[2] = mlx4_en_test_speed(priv); for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) { if (buf[i]) *flags |= ETH_TEST_FL_FAILED; } }
gpl-2.0
xcstacy/flo-kernel
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
5181
4990
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/mlx4/driver.h> #include "mlx4_en.h" static int mlx4_en_test_registers(struct mlx4_en_priv *priv) { return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) { struct sk_buff *skb; struct ethhdr *ethh; unsigned char *packet; unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD; unsigned int i; int err; /* build the pkt before xmit */ skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); if (!skb) { en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n"); return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); packet = (unsigned char *)skb_put(skb, packet_size); memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); memset(ethh->h_source, 0, ETH_ALEN); ethh->h_proto = htons(ETH_P_ARP); skb_set_mac_header(skb, 0); for (i = 0; i < packet_size; ++i) /* fill our packet */ packet[i] = (unsigned char)(i & 0xff); /* xmit the pkt */ err = mlx4_en_xmit(skb, priv->dev); return err; } static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) { u32 loopback_ok = 0; int i; priv->loopback_ok = 0; priv->validate_loopback = 1; /* xmit */ if (mlx4_en_test_loopback_xmit(priv)) { en_err(priv, "Transmitting loopback packet failed\n"); goto mlx4_en_test_loopback_exit; } /* polling for result */ for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) { msleep(MLX4_EN_LOOPBACK_TIMEOUT); if (priv->loopback_ok) { loopback_ok = 1; break; } } if (!loopback_ok) en_err(priv, "Loopback packet didn't arrive\n"); mlx4_en_test_loopback_exit: priv->validate_loopback = 0; return !loopback_ok; } static int mlx4_en_test_link(struct mlx4_en_priv *priv) { if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; if (priv->port_state.link_state == 1) return 0; else return 1; } static int mlx4_en_test_speed(struct mlx4_en_priv *priv) { if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; /* The device currently only supports 10G speed */ if (priv->port_state.link_speed != SPEED_10000) return priv->port_state.link_speed; return 0; } void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *tx_ring; int i, carrier_ok; memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); if (*flags & ETH_TEST_FL_OFFLINE) { /* disable the interface */ carrier_ok = netif_carrier_ok(dev); netif_carrier_off(dev); retry_tx: /* Wait until all tx queues are empty. * there should not be any additional incoming traffic * since we turned the carrier off */ msleep(200); for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { tx_ring = &priv->tx_ring[i]; if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) goto retry_tx; } if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { buf[3] = mlx4_en_test_registers(priv); buf[4] = mlx4_en_test_loopback(priv); } if (carrier_ok) netif_carrier_on(dev); } buf[0] = mlx4_test_interrupts(mdev->dev); buf[1] = mlx4_en_test_link(priv); buf[2] = mlx4_en_test_speed(priv); for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) { if (buf[i]) *flags |= ETH_TEST_FL_FAILED; } }
gpl-2.0